1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ASTContext interface. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/AST/ASTContext.h" 14 #include "CXXABI.h" 15 #include "Interp/Context.h" 16 #include "clang/AST/APValue.h" 17 #include "clang/AST/ASTConcept.h" 18 #include "clang/AST/ASTMutationListener.h" 19 #include "clang/AST/ASTTypeTraits.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/AttrIterator.h" 22 #include "clang/AST/CharUnits.h" 23 #include "clang/AST/Comment.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclBase.h" 26 #include "clang/AST/DeclCXX.h" 27 #include "clang/AST/DeclContextInternals.h" 28 #include "clang/AST/DeclObjC.h" 29 #include "clang/AST/DeclOpenMP.h" 30 #include "clang/AST/DeclTemplate.h" 31 #include "clang/AST/DeclarationName.h" 32 #include "clang/AST/DependenceFlags.h" 33 #include "clang/AST/Expr.h" 34 #include "clang/AST/ExprCXX.h" 35 #include "clang/AST/ExprConcepts.h" 36 #include "clang/AST/ExternalASTSource.h" 37 #include "clang/AST/Mangle.h" 38 #include "clang/AST/MangleNumberingContext.h" 39 #include "clang/AST/NestedNameSpecifier.h" 40 #include "clang/AST/ParentMapContext.h" 41 #include "clang/AST/RawCommentList.h" 42 #include "clang/AST/RecordLayout.h" 43 #include "clang/AST/Stmt.h" 44 #include "clang/AST/TemplateBase.h" 45 #include "clang/AST/TemplateName.h" 46 #include "clang/AST/Type.h" 47 #include "clang/AST/TypeLoc.h" 48 #include "clang/AST/UnresolvedSet.h" 49 #include "clang/AST/VTableBuilder.h" 50 #include "clang/Basic/AddressSpaces.h" 51 #include "clang/Basic/Builtins.h" 52 #include "clang/Basic/CommentOptions.h" 53 #include "clang/Basic/ExceptionSpecificationType.h" 54 #include "clang/Basic/IdentifierTable.h" 55 #include "clang/Basic/LLVM.h" 56 #include "clang/Basic/LangOptions.h" 57 #include "clang/Basic/Linkage.h" 58 #include "clang/Basic/Module.h" 59 #include "clang/Basic/NoSanitizeList.h" 60 #include "clang/Basic/ObjCRuntime.h" 61 #include "clang/Basic/ProfileList.h" 62 #include "clang/Basic/SourceLocation.h" 63 #include "clang/Basic/SourceManager.h" 64 #include "clang/Basic/Specifiers.h" 65 #include "clang/Basic/TargetCXXABI.h" 66 #include "clang/Basic/TargetInfo.h" 67 #include "clang/Basic/XRayLists.h" 68 #include "llvm/ADT/APFixedPoint.h" 69 #include "llvm/ADT/APInt.h" 70 #include "llvm/ADT/APSInt.h" 71 #include "llvm/ADT/ArrayRef.h" 72 #include "llvm/ADT/DenseMap.h" 73 #include "llvm/ADT/DenseSet.h" 74 #include "llvm/ADT/FoldingSet.h" 75 #include "llvm/ADT/PointerUnion.h" 76 #include "llvm/ADT/STLExtras.h" 77 #include "llvm/ADT/SmallPtrSet.h" 78 #include "llvm/ADT/SmallVector.h" 79 #include "llvm/ADT/StringExtras.h" 80 #include "llvm/ADT/StringRef.h" 81 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 82 #include "llvm/Support/Capacity.h" 83 #include "llvm/Support/Casting.h" 84 #include "llvm/Support/Compiler.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/MD5.h" 87 #include "llvm/Support/MathExtras.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include "llvm/TargetParser/Triple.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstddef> 93 #include <cstdint> 94 #include <cstdlib> 95 #include <map> 96 #include <memory> 97 #include <optional> 98 #include <string> 99 #include <tuple> 100 #include <utility> 101 102 using namespace clang; 103 104 enum FloatingRank { 105 BFloat16Rank, 106 Float16Rank, 107 HalfRank, 108 FloatRank, 109 DoubleRank, 110 LongDoubleRank, 111 Float128Rank, 112 Ibm128Rank 113 }; 114 115 /// \returns The locations that are relevant when searching for Doc comments 116 /// related to \p D. 117 static SmallVector<SourceLocation, 2> 118 getDeclLocsForCommentSearch(const Decl *D, SourceManager &SourceMgr) { 119 assert(D); 120 121 // User can not attach documentation to implicit declarations. 122 if (D->isImplicit()) 123 return {}; 124 125 // User can not attach documentation to implicit instantiations. 126 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 127 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 128 return {}; 129 } 130 131 if (const auto *VD = dyn_cast<VarDecl>(D)) { 132 if (VD->isStaticDataMember() && 133 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 134 return {}; 135 } 136 137 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) { 138 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 139 return {}; 140 } 141 142 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) { 143 TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); 144 if (TSK == TSK_ImplicitInstantiation || 145 TSK == TSK_Undeclared) 146 return {}; 147 } 148 149 if (const auto *ED = dyn_cast<EnumDecl>(D)) { 150 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 151 return {}; 152 } 153 if (const auto *TD = dyn_cast<TagDecl>(D)) { 154 // When tag declaration (but not definition!) is part of the 155 // decl-specifier-seq of some other declaration, it doesn't get comment 156 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) 157 return {}; 158 } 159 // TODO: handle comments for function parameters properly. 160 if (isa<ParmVarDecl>(D)) 161 return {}; 162 163 // TODO: we could look up template parameter documentation in the template 164 // documentation. 165 if (isa<TemplateTypeParmDecl>(D) || 166 isa<NonTypeTemplateParmDecl>(D) || 167 isa<TemplateTemplateParmDecl>(D)) 168 return {}; 169 170 SmallVector<SourceLocation, 2> Locations; 171 // Find declaration location. 172 // For Objective-C declarations we generally don't expect to have multiple 173 // declarators, thus use declaration starting location as the "declaration 174 // location". 175 // For all other declarations multiple declarators are used quite frequently, 176 // so we use the location of the identifier as the "declaration location". 177 SourceLocation BaseLocation; 178 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || 179 isa<ObjCPropertyDecl>(D) || isa<RedeclarableTemplateDecl>(D) || 180 isa<ClassTemplateSpecializationDecl>(D) || 181 // Allow association with Y across {} in `typedef struct X {} Y`. 182 isa<TypedefDecl>(D)) 183 BaseLocation = D->getBeginLoc(); 184 else 185 BaseLocation = D->getLocation(); 186 187 if (!D->getLocation().isMacroID()) { 188 Locations.emplace_back(BaseLocation); 189 } else { 190 const auto *DeclCtx = D->getDeclContext(); 191 192 // When encountering definitions generated from a macro (that are not 193 // contained by another declaration in the macro) we need to try and find 194 // the comment at the location of the expansion but if there is no comment 195 // there we should retry to see if there is a comment inside the macro as 196 // well. To this end we return first BaseLocation to first look at the 197 // expansion site, the second value is the spelling location of the 198 // beginning of the declaration defined inside the macro. 199 if (!(DeclCtx && 200 Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) { 201 Locations.emplace_back(SourceMgr.getExpansionLoc(BaseLocation)); 202 } 203 204 // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that 205 // we don't refer to the macro argument location at the expansion site (this 206 // can happen if the name's spelling is provided via macro argument), and 207 // always to the declaration itself. 208 Locations.emplace_back(SourceMgr.getSpellingLoc(D->getBeginLoc())); 209 } 210 211 return Locations; 212 } 213 214 RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( 215 const Decl *D, const SourceLocation RepresentativeLocForDecl, 216 const std::map<unsigned, RawComment *> &CommentsInTheFile) const { 217 // If the declaration doesn't map directly to a location in a file, we 218 // can't find the comment. 219 if (RepresentativeLocForDecl.isInvalid() || 220 !RepresentativeLocForDecl.isFileID()) 221 return nullptr; 222 223 // If there are no comments anywhere, we won't find anything. 224 if (CommentsInTheFile.empty()) 225 return nullptr; 226 227 // Decompose the location for the declaration and find the beginning of the 228 // file buffer. 229 const std::pair<FileID, unsigned> DeclLocDecomp = 230 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); 231 232 // Slow path. 233 auto OffsetCommentBehindDecl = 234 CommentsInTheFile.lower_bound(DeclLocDecomp.second); 235 236 // First check whether we have a trailing comment. 237 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { 238 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; 239 if ((CommentBehindDecl->isDocumentation() || 240 LangOpts.CommentOpts.ParseAllComments) && 241 CommentBehindDecl->isTrailingComment() && 242 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || 243 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { 244 245 // Check that Doxygen trailing comment comes after the declaration, starts 246 // on the same line and in the same file as the declaration. 247 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == 248 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, 249 OffsetCommentBehindDecl->first)) { 250 return CommentBehindDecl; 251 } 252 } 253 } 254 255 // The comment just after the declaration was not a trailing comment. 256 // Let's look at the previous comment. 257 if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) 258 return nullptr; 259 260 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; 261 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; 262 263 // Check that we actually have a non-member Doxygen comment. 264 if (!(CommentBeforeDecl->isDocumentation() || 265 LangOpts.CommentOpts.ParseAllComments) || 266 CommentBeforeDecl->isTrailingComment()) 267 return nullptr; 268 269 // Decompose the end of the comment. 270 const unsigned CommentEndOffset = 271 Comments.getCommentEndOffset(CommentBeforeDecl); 272 273 // Get the corresponding buffer. 274 bool Invalid = false; 275 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, 276 &Invalid).data(); 277 if (Invalid) 278 return nullptr; 279 280 // Extract text between the comment and declaration. 281 StringRef Text(Buffer + CommentEndOffset, 282 DeclLocDecomp.second - CommentEndOffset); 283 284 // There should be no other declarations or preprocessor directives between 285 // comment and declaration. 286 if (Text.find_last_of(";{}#@") != StringRef::npos) 287 return nullptr; 288 289 return CommentBeforeDecl; 290 } 291 292 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { 293 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr); 294 295 for (const auto DeclLoc : DeclLocs) { 296 // If the declaration doesn't map directly to a location in a file, we 297 // can't find the comment. 298 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 299 continue; 300 301 if (ExternalSource && !CommentsLoaded) { 302 ExternalSource->ReadComments(); 303 CommentsLoaded = true; 304 } 305 306 if (Comments.empty()) 307 continue; 308 309 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; 310 if (!File.isValid()) 311 continue; 312 313 const auto CommentsInThisFile = Comments.getCommentsInFile(File); 314 if (!CommentsInThisFile || CommentsInThisFile->empty()) 315 continue; 316 317 if (RawComment *Comment = 318 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) 319 return Comment; 320 } 321 322 return nullptr; 323 } 324 325 void ASTContext::addComment(const RawComment &RC) { 326 assert(LangOpts.RetainCommentsFromSystemHeaders || 327 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())); 328 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); 329 } 330 331 /// If we have a 'templated' declaration for a template, adjust 'D' to 332 /// refer to the actual template. 333 /// If we have an implicit instantiation, adjust 'D' to refer to template. 334 static const Decl &adjustDeclToTemplate(const Decl &D) { 335 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) { 336 // Is this function declaration part of a function template? 337 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) 338 return *FTD; 339 340 // Nothing to do if function is not an implicit instantiation. 341 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) 342 return D; 343 344 // Function is an implicit instantiation of a function template? 345 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) 346 return *FTD; 347 348 // Function is instantiated from a member definition of a class template? 349 if (const FunctionDecl *MemberDecl = 350 FD->getInstantiatedFromMemberFunction()) 351 return *MemberDecl; 352 353 return D; 354 } 355 if (const auto *VD = dyn_cast<VarDecl>(&D)) { 356 // Static data member is instantiated from a member definition of a class 357 // template? 358 if (VD->isStaticDataMember()) 359 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) 360 return *MemberDecl; 361 362 return D; 363 } 364 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) { 365 // Is this class declaration part of a class template? 366 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) 367 return *CTD; 368 369 // Class is an implicit instantiation of a class template or partial 370 // specialization? 371 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { 372 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) 373 return D; 374 llvm::PointerUnion<ClassTemplateDecl *, 375 ClassTemplatePartialSpecializationDecl *> 376 PU = CTSD->getSpecializedTemplateOrPartial(); 377 return PU.is<ClassTemplateDecl *>() 378 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>()) 379 : *static_cast<const Decl *>( 380 PU.get<ClassTemplatePartialSpecializationDecl *>()); 381 } 382 383 // Class is instantiated from a member definition of a class template? 384 if (const MemberSpecializationInfo *Info = 385 CRD->getMemberSpecializationInfo()) 386 return *Info->getInstantiatedFrom(); 387 388 return D; 389 } 390 if (const auto *ED = dyn_cast<EnumDecl>(&D)) { 391 // Enum is instantiated from a member definition of a class template? 392 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) 393 return *MemberDecl; 394 395 return D; 396 } 397 // FIXME: Adjust alias templates? 398 return D; 399 } 400 401 const RawComment *ASTContext::getRawCommentForAnyRedecl( 402 const Decl *D, 403 const Decl **OriginalDecl) const { 404 if (!D) { 405 if (OriginalDecl) 406 OriginalDecl = nullptr; 407 return nullptr; 408 } 409 410 D = &adjustDeclToTemplate(*D); 411 412 // Any comment directly attached to D? 413 { 414 auto DeclComment = DeclRawComments.find(D); 415 if (DeclComment != DeclRawComments.end()) { 416 if (OriginalDecl) 417 *OriginalDecl = D; 418 return DeclComment->second; 419 } 420 } 421 422 // Any comment attached to any redeclaration of D? 423 const Decl *CanonicalD = D->getCanonicalDecl(); 424 if (!CanonicalD) 425 return nullptr; 426 427 { 428 auto RedeclComment = RedeclChainComments.find(CanonicalD); 429 if (RedeclComment != RedeclChainComments.end()) { 430 if (OriginalDecl) 431 *OriginalDecl = RedeclComment->second; 432 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); 433 assert(CommentAtRedecl != DeclRawComments.end() && 434 "This decl is supposed to have comment attached."); 435 return CommentAtRedecl->second; 436 } 437 } 438 439 // Any redeclarations of D that we haven't checked for comments yet? 440 // We can't use DenseMap::iterator directly since it'd get invalid. 441 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { 442 return CommentlessRedeclChains.lookup(CanonicalD); 443 }(); 444 445 for (const auto Redecl : D->redecls()) { 446 assert(Redecl); 447 // Skip all redeclarations that have been checked previously. 448 if (LastCheckedRedecl) { 449 if (LastCheckedRedecl == Redecl) { 450 LastCheckedRedecl = nullptr; 451 } 452 continue; 453 } 454 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); 455 if (RedeclComment) { 456 cacheRawCommentForDecl(*Redecl, *RedeclComment); 457 if (OriginalDecl) 458 *OriginalDecl = Redecl; 459 return RedeclComment; 460 } 461 CommentlessRedeclChains[CanonicalD] = Redecl; 462 } 463 464 if (OriginalDecl) 465 *OriginalDecl = nullptr; 466 return nullptr; 467 } 468 469 void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, 470 const RawComment &Comment) const { 471 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments); 472 DeclRawComments.try_emplace(&OriginalD, &Comment); 473 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); 474 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); 475 CommentlessRedeclChains.erase(CanonicalDecl); 476 } 477 478 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, 479 SmallVectorImpl<const NamedDecl *> &Redeclared) { 480 const DeclContext *DC = ObjCMethod->getDeclContext(); 481 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) { 482 const ObjCInterfaceDecl *ID = IMD->getClassInterface(); 483 if (!ID) 484 return; 485 // Add redeclared method here. 486 for (const auto *Ext : ID->known_extensions()) { 487 if (ObjCMethodDecl *RedeclaredMethod = 488 Ext->getMethod(ObjCMethod->getSelector(), 489 ObjCMethod->isInstanceMethod())) 490 Redeclared.push_back(RedeclaredMethod); 491 } 492 } 493 } 494 495 void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, 496 const Preprocessor *PP) { 497 if (Comments.empty() || Decls.empty()) 498 return; 499 500 FileID File; 501 for (Decl *D : Decls) { 502 SourceLocation Loc = D->getLocation(); 503 if (Loc.isValid()) { 504 // See if there are any new comments that are not attached to a decl. 505 // The location doesn't have to be precise - we care only about the file. 506 File = SourceMgr.getDecomposedLoc(Loc).first; 507 break; 508 } 509 } 510 511 if (File.isInvalid()) 512 return; 513 514 auto CommentsInThisFile = Comments.getCommentsInFile(File); 515 if (!CommentsInThisFile || CommentsInThisFile->empty() || 516 CommentsInThisFile->rbegin()->second->isAttached()) 517 return; 518 519 // There is at least one comment not attached to a decl. 520 // Maybe it should be attached to one of Decls? 521 // 522 // Note that this way we pick up not only comments that precede the 523 // declaration, but also comments that *follow* the declaration -- thanks to 524 // the lookahead in the lexer: we've consumed the semicolon and looked 525 // ahead through comments. 526 for (const Decl *D : Decls) { 527 assert(D); 528 if (D->isInvalidDecl()) 529 continue; 530 531 D = &adjustDeclToTemplate(*D); 532 533 if (DeclRawComments.count(D) > 0) 534 continue; 535 536 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr); 537 538 for (const auto DeclLoc : DeclLocs) { 539 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 540 continue; 541 542 if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl( 543 D, DeclLoc, *CommentsInThisFile)) { 544 cacheRawCommentForDecl(*D, *DocComment); 545 comments::FullComment *FC = DocComment->parse(*this, PP, D); 546 ParsedComments[D->getCanonicalDecl()] = FC; 547 break; 548 } 549 } 550 } 551 } 552 553 comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, 554 const Decl *D) const { 555 auto *ThisDeclInfo = new (*this) comments::DeclInfo; 556 ThisDeclInfo->CommentDecl = D; 557 ThisDeclInfo->IsFilled = false; 558 ThisDeclInfo->fill(); 559 ThisDeclInfo->CommentDecl = FC->getDecl(); 560 if (!ThisDeclInfo->TemplateParameters) 561 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; 562 comments::FullComment *CFC = 563 new (*this) comments::FullComment(FC->getBlocks(), 564 ThisDeclInfo); 565 return CFC; 566 } 567 568 comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { 569 const RawComment *RC = getRawCommentForDeclNoCache(D); 570 return RC ? RC->parse(*this, nullptr, D) : nullptr; 571 } 572 573 comments::FullComment *ASTContext::getCommentForDecl( 574 const Decl *D, 575 const Preprocessor *PP) const { 576 if (!D || D->isInvalidDecl()) 577 return nullptr; 578 D = &adjustDeclToTemplate(*D); 579 580 const Decl *Canonical = D->getCanonicalDecl(); 581 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = 582 ParsedComments.find(Canonical); 583 584 if (Pos != ParsedComments.end()) { 585 if (Canonical != D) { 586 comments::FullComment *FC = Pos->second; 587 comments::FullComment *CFC = cloneFullComment(FC, D); 588 return CFC; 589 } 590 return Pos->second; 591 } 592 593 const Decl *OriginalDecl = nullptr; 594 595 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); 596 if (!RC) { 597 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { 598 SmallVector<const NamedDecl*, 8> Overridden; 599 const auto *OMD = dyn_cast<ObjCMethodDecl>(D); 600 if (OMD && OMD->isPropertyAccessor()) 601 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) 602 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) 603 return cloneFullComment(FC, D); 604 if (OMD) 605 addRedeclaredMethods(OMD, Overridden); 606 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); 607 for (unsigned i = 0, e = Overridden.size(); i < e; i++) 608 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) 609 return cloneFullComment(FC, D); 610 } 611 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) { 612 // Attach any tag type's documentation to its typedef if latter 613 // does not have one of its own. 614 QualType QT = TD->getUnderlyingType(); 615 if (const auto *TT = QT->getAs<TagType>()) 616 if (const Decl *TD = TT->getDecl()) 617 if (comments::FullComment *FC = getCommentForDecl(TD, PP)) 618 return cloneFullComment(FC, D); 619 } 620 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) { 621 while (IC->getSuperClass()) { 622 IC = IC->getSuperClass(); 623 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 624 return cloneFullComment(FC, D); 625 } 626 } 627 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) { 628 if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) 629 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 630 return cloneFullComment(FC, D); 631 } 632 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { 633 if (!(RD = RD->getDefinition())) 634 return nullptr; 635 // Check non-virtual bases. 636 for (const auto &I : RD->bases()) { 637 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) 638 continue; 639 QualType Ty = I.getType(); 640 if (Ty.isNull()) 641 continue; 642 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { 643 if (!(NonVirtualBase= NonVirtualBase->getDefinition())) 644 continue; 645 646 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) 647 return cloneFullComment(FC, D); 648 } 649 } 650 // Check virtual bases. 651 for (const auto &I : RD->vbases()) { 652 if (I.getAccessSpecifier() != AS_public) 653 continue; 654 QualType Ty = I.getType(); 655 if (Ty.isNull()) 656 continue; 657 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { 658 if (!(VirtualBase= VirtualBase->getDefinition())) 659 continue; 660 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) 661 return cloneFullComment(FC, D); 662 } 663 } 664 } 665 return nullptr; 666 } 667 668 // If the RawComment was attached to other redeclaration of this Decl, we 669 // should parse the comment in context of that other Decl. This is important 670 // because comments can contain references to parameter names which can be 671 // different across redeclarations. 672 if (D != OriginalDecl && OriginalDecl) 673 return getCommentForDecl(OriginalDecl, PP); 674 675 comments::FullComment *FC = RC->parse(*this, PP, D); 676 ParsedComments[Canonical] = FC; 677 return FC; 678 } 679 680 void 681 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 682 const ASTContext &C, 683 TemplateTemplateParmDecl *Parm) { 684 ID.AddInteger(Parm->getDepth()); 685 ID.AddInteger(Parm->getPosition()); 686 ID.AddBoolean(Parm->isParameterPack()); 687 688 TemplateParameterList *Params = Parm->getTemplateParameters(); 689 ID.AddInteger(Params->size()); 690 for (TemplateParameterList::const_iterator P = Params->begin(), 691 PEnd = Params->end(); 692 P != PEnd; ++P) { 693 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 694 ID.AddInteger(0); 695 ID.AddBoolean(TTP->isParameterPack()); 696 if (TTP->isExpandedParameterPack()) { 697 ID.AddBoolean(true); 698 ID.AddInteger(TTP->getNumExpansionParameters()); 699 } else 700 ID.AddBoolean(false); 701 continue; 702 } 703 704 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 705 ID.AddInteger(1); 706 ID.AddBoolean(NTTP->isParameterPack()); 707 ID.AddPointer(C.getUnconstrainedType(C.getCanonicalType(NTTP->getType())) 708 .getAsOpaquePtr()); 709 if (NTTP->isExpandedParameterPack()) { 710 ID.AddBoolean(true); 711 ID.AddInteger(NTTP->getNumExpansionTypes()); 712 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 713 QualType T = NTTP->getExpansionType(I); 714 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); 715 } 716 } else 717 ID.AddBoolean(false); 718 continue; 719 } 720 721 auto *TTP = cast<TemplateTemplateParmDecl>(*P); 722 ID.AddInteger(2); 723 Profile(ID, C, TTP); 724 } 725 } 726 727 TemplateTemplateParmDecl * 728 ASTContext::getCanonicalTemplateTemplateParmDecl( 729 TemplateTemplateParmDecl *TTP) const { 730 // Check if we already have a canonical template template parameter. 731 llvm::FoldingSetNodeID ID; 732 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); 733 void *InsertPos = nullptr; 734 CanonicalTemplateTemplateParm *Canonical 735 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 736 if (Canonical) 737 return Canonical->getParam(); 738 739 // Build a canonical template parameter list. 740 TemplateParameterList *Params = TTP->getTemplateParameters(); 741 SmallVector<NamedDecl *, 4> CanonParams; 742 CanonParams.reserve(Params->size()); 743 for (TemplateParameterList::const_iterator P = Params->begin(), 744 PEnd = Params->end(); 745 P != PEnd; ++P) { 746 // Note that, per C++20 [temp.over.link]/6, when determining whether 747 // template-parameters are equivalent, constraints are ignored. 748 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 749 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create( 750 *this, getTranslationUnitDecl(), SourceLocation(), SourceLocation(), 751 TTP->getDepth(), TTP->getIndex(), nullptr, false, 752 TTP->isParameterPack(), /*HasTypeConstraint=*/false, 753 TTP->isExpandedParameterPack() 754 ? std::optional<unsigned>(TTP->getNumExpansionParameters()) 755 : std::nullopt); 756 CanonParams.push_back(NewTTP); 757 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 758 QualType T = getUnconstrainedType(getCanonicalType(NTTP->getType())); 759 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 760 NonTypeTemplateParmDecl *Param; 761 if (NTTP->isExpandedParameterPack()) { 762 SmallVector<QualType, 2> ExpandedTypes; 763 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 764 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 765 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 766 ExpandedTInfos.push_back( 767 getTrivialTypeSourceInfo(ExpandedTypes.back())); 768 } 769 770 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 771 SourceLocation(), 772 SourceLocation(), 773 NTTP->getDepth(), 774 NTTP->getPosition(), nullptr, 775 T, 776 TInfo, 777 ExpandedTypes, 778 ExpandedTInfos); 779 } else { 780 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 781 SourceLocation(), 782 SourceLocation(), 783 NTTP->getDepth(), 784 NTTP->getPosition(), nullptr, 785 T, 786 NTTP->isParameterPack(), 787 TInfo); 788 } 789 CanonParams.push_back(Param); 790 } else 791 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 792 cast<TemplateTemplateParmDecl>(*P))); 793 } 794 795 TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create( 796 *this, getTranslationUnitDecl(), SourceLocation(), TTP->getDepth(), 797 TTP->getPosition(), TTP->isParameterPack(), nullptr, 798 TemplateParameterList::Create(*this, SourceLocation(), SourceLocation(), 799 CanonParams, SourceLocation(), 800 /*RequiresClause=*/nullptr)); 801 802 // Get the new insert position for the node we care about. 803 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 804 assert(!Canonical && "Shouldn't be in the map!"); 805 (void)Canonical; 806 807 // Create the canonical template template parameter entry. 808 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 809 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 810 return CanonTTP; 811 } 812 813 TargetCXXABI::Kind ASTContext::getCXXABIKind() const { 814 auto Kind = getTargetInfo().getCXXABI().getKind(); 815 return getLangOpts().CXXABI.value_or(Kind); 816 } 817 818 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 819 if (!LangOpts.CPlusPlus) return nullptr; 820 821 switch (getCXXABIKind()) { 822 case TargetCXXABI::AppleARM64: 823 case TargetCXXABI::Fuchsia: 824 case TargetCXXABI::GenericARM: // Same as Itanium at this level 825 case TargetCXXABI::iOS: 826 case TargetCXXABI::WatchOS: 827 case TargetCXXABI::GenericAArch64: 828 case TargetCXXABI::GenericMIPS: 829 case TargetCXXABI::GenericItanium: 830 case TargetCXXABI::WebAssembly: 831 case TargetCXXABI::XL: 832 return CreateItaniumCXXABI(*this); 833 case TargetCXXABI::Microsoft: 834 return CreateMicrosoftCXXABI(*this); 835 } 836 llvm_unreachable("Invalid CXXABI type!"); 837 } 838 839 interp::Context &ASTContext::getInterpContext() { 840 if (!InterpContext) { 841 InterpContext.reset(new interp::Context(*this)); 842 } 843 return *InterpContext.get(); 844 } 845 846 ParentMapContext &ASTContext::getParentMapContext() { 847 if (!ParentMapCtx) 848 ParentMapCtx.reset(new ParentMapContext(*this)); 849 return *ParentMapCtx.get(); 850 } 851 852 static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, 853 const LangOptions &LangOpts) { 854 switch (LangOpts.getAddressSpaceMapMangling()) { 855 case LangOptions::ASMM_Target: 856 return TI.useAddressSpaceMapMangling(); 857 case LangOptions::ASMM_On: 858 return true; 859 case LangOptions::ASMM_Off: 860 return false; 861 } 862 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); 863 } 864 865 ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, 866 IdentifierTable &idents, SelectorTable &sels, 867 Builtin::Context &builtins, TranslationUnitKind TUKind) 868 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize), 869 DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()), 870 DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()), 871 DependentSizedMatrixTypes(this_()), 872 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize), 873 DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()), 874 TemplateSpecializationTypes(this_()), 875 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), 876 DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()), 877 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), 878 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), 879 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, 880 LangOpts.XRayNeverInstrumentFiles, 881 LangOpts.XRayAttrListFiles, SM)), 882 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)), 883 PrintingPolicy(LOpts), Idents(idents), Selectors(sels), 884 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this), 885 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), 886 CompCategories(this_()), LastSDM(nullptr, 0) { 887 addTranslationUnitDecl(); 888 } 889 890 void ASTContext::cleanup() { 891 // Release the DenseMaps associated with DeclContext objects. 892 // FIXME: Is this the ideal solution? 893 ReleaseDeclContextMaps(); 894 895 // Call all of the deallocation functions on all of their targets. 896 for (auto &Pair : Deallocations) 897 (Pair.first)(Pair.second); 898 Deallocations.clear(); 899 900 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 901 // because they can contain DenseMaps. 902 for (llvm::DenseMap<const ObjCContainerDecl*, 903 const ASTRecordLayout*>::iterator 904 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 905 // Increment in loop to prevent using deallocated memory. 906 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 907 R->Destroy(*this); 908 ObjCLayouts.clear(); 909 910 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 911 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 912 // Increment in loop to prevent using deallocated memory. 913 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 914 R->Destroy(*this); 915 } 916 ASTRecordLayouts.clear(); 917 918 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 919 AEnd = DeclAttrs.end(); 920 A != AEnd; ++A) 921 A->second->~AttrVec(); 922 DeclAttrs.clear(); 923 924 for (const auto &Value : ModuleInitializers) 925 Value.second->~PerModuleInitializers(); 926 ModuleInitializers.clear(); 927 } 928 929 ASTContext::~ASTContext() { cleanup(); } 930 931 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) { 932 TraversalScope = TopLevelDecls; 933 getParentMapContext().clear(); 934 } 935 936 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { 937 Deallocations.push_back({Callback, Data}); 938 } 939 940 void 941 ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) { 942 ExternalSource = std::move(Source); 943 } 944 945 void ASTContext::PrintStats() const { 946 llvm::errs() << "\n*** AST Context Stats:\n"; 947 llvm::errs() << " " << Types.size() << " types total.\n"; 948 949 unsigned counts[] = { 950 #define TYPE(Name, Parent) 0, 951 #define ABSTRACT_TYPE(Name, Parent) 952 #include "clang/AST/TypeNodes.inc" 953 0 // Extra 954 }; 955 956 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 957 Type *T = Types[i]; 958 counts[(unsigned)T->getTypeClass()]++; 959 } 960 961 unsigned Idx = 0; 962 unsigned TotalBytes = 0; 963 #define TYPE(Name, Parent) \ 964 if (counts[Idx]) \ 965 llvm::errs() << " " << counts[Idx] << " " << #Name \ 966 << " types, " << sizeof(Name##Type) << " each " \ 967 << "(" << counts[Idx] * sizeof(Name##Type) \ 968 << " bytes)\n"; \ 969 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 970 ++Idx; 971 #define ABSTRACT_TYPE(Name, Parent) 972 #include "clang/AST/TypeNodes.inc" 973 974 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 975 976 // Implicit special member functions. 977 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 978 << NumImplicitDefaultConstructors 979 << " implicit default constructors created\n"; 980 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 981 << NumImplicitCopyConstructors 982 << " implicit copy constructors created\n"; 983 if (getLangOpts().CPlusPlus) 984 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 985 << NumImplicitMoveConstructors 986 << " implicit move constructors created\n"; 987 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 988 << NumImplicitCopyAssignmentOperators 989 << " implicit copy assignment operators created\n"; 990 if (getLangOpts().CPlusPlus) 991 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 992 << NumImplicitMoveAssignmentOperators 993 << " implicit move assignment operators created\n"; 994 llvm::errs() << NumImplicitDestructorsDeclared << "/" 995 << NumImplicitDestructors 996 << " implicit destructors created\n"; 997 998 if (ExternalSource) { 999 llvm::errs() << "\n"; 1000 ExternalSource->PrintStats(); 1001 } 1002 1003 BumpAlloc.PrintStats(); 1004 } 1005 1006 void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, 1007 bool NotifyListeners) { 1008 if (NotifyListeners) 1009 if (auto *Listener = getASTMutationListener()) 1010 Listener->RedefinedHiddenDefinition(ND, M); 1011 1012 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M); 1013 } 1014 1015 void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { 1016 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl())); 1017 if (It == MergedDefModules.end()) 1018 return; 1019 1020 auto &Merged = It->second; 1021 llvm::DenseSet<Module*> Found; 1022 for (Module *&M : Merged) 1023 if (!Found.insert(M).second) 1024 M = nullptr; 1025 llvm::erase(Merged, nullptr); 1026 } 1027 1028 ArrayRef<Module *> 1029 ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { 1030 auto MergedIt = 1031 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); 1032 if (MergedIt == MergedDefModules.end()) 1033 return std::nullopt; 1034 return MergedIt->second; 1035 } 1036 1037 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { 1038 if (LazyInitializers.empty()) 1039 return; 1040 1041 auto *Source = Ctx.getExternalSource(); 1042 assert(Source && "lazy initializers but no external source"); 1043 1044 auto LazyInits = std::move(LazyInitializers); 1045 LazyInitializers.clear(); 1046 1047 for (auto ID : LazyInits) 1048 Initializers.push_back(Source->GetExternalDecl(ID)); 1049 1050 assert(LazyInitializers.empty() && 1051 "GetExternalDecl for lazy module initializer added more inits"); 1052 } 1053 1054 void ASTContext::addModuleInitializer(Module *M, Decl *D) { 1055 // One special case: if we add a module initializer that imports another 1056 // module, and that module's only initializer is an ImportDecl, simplify. 1057 if (const auto *ID = dyn_cast<ImportDecl>(D)) { 1058 auto It = ModuleInitializers.find(ID->getImportedModule()); 1059 1060 // Maybe the ImportDecl does nothing at all. (Common case.) 1061 if (It == ModuleInitializers.end()) 1062 return; 1063 1064 // Maybe the ImportDecl only imports another ImportDecl. 1065 auto &Imported = *It->second; 1066 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { 1067 Imported.resolve(*this); 1068 auto *OnlyDecl = Imported.Initializers.front(); 1069 if (isa<ImportDecl>(OnlyDecl)) 1070 D = OnlyDecl; 1071 } 1072 } 1073 1074 auto *&Inits = ModuleInitializers[M]; 1075 if (!Inits) 1076 Inits = new (*this) PerModuleInitializers; 1077 Inits->Initializers.push_back(D); 1078 } 1079 1080 void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { 1081 auto *&Inits = ModuleInitializers[M]; 1082 if (!Inits) 1083 Inits = new (*this) PerModuleInitializers; 1084 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), 1085 IDs.begin(), IDs.end()); 1086 } 1087 1088 ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) { 1089 auto It = ModuleInitializers.find(M); 1090 if (It == ModuleInitializers.end()) 1091 return std::nullopt; 1092 1093 auto *Inits = It->second; 1094 Inits->resolve(*this); 1095 return Inits->Initializers; 1096 } 1097 1098 void ASTContext::setCurrentNamedModule(Module *M) { 1099 assert(M->isNamedModule()); 1100 assert(!CurrentCXXNamedModule && 1101 "We should set named module for ASTContext for only once"); 1102 CurrentCXXNamedModule = M; 1103 } 1104 1105 ExternCContextDecl *ASTContext::getExternCContextDecl() const { 1106 if (!ExternCContext) 1107 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); 1108 1109 return ExternCContext; 1110 } 1111 1112 BuiltinTemplateDecl * 1113 ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, 1114 const IdentifierInfo *II) const { 1115 auto *BuiltinTemplate = 1116 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK); 1117 BuiltinTemplate->setImplicit(); 1118 getTranslationUnitDecl()->addDecl(BuiltinTemplate); 1119 1120 return BuiltinTemplate; 1121 } 1122 1123 BuiltinTemplateDecl * 1124 ASTContext::getMakeIntegerSeqDecl() const { 1125 if (!MakeIntegerSeqDecl) 1126 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, 1127 getMakeIntegerSeqName()); 1128 return MakeIntegerSeqDecl; 1129 } 1130 1131 BuiltinTemplateDecl * 1132 ASTContext::getTypePackElementDecl() const { 1133 if (!TypePackElementDecl) 1134 TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, 1135 getTypePackElementName()); 1136 return TypePackElementDecl; 1137 } 1138 1139 RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, 1140 RecordDecl::TagKind TK) const { 1141 SourceLocation Loc; 1142 RecordDecl *NewDecl; 1143 if (getLangOpts().CPlusPlus) 1144 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, 1145 Loc, &Idents.get(Name)); 1146 else 1147 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, 1148 &Idents.get(Name)); 1149 NewDecl->setImplicit(); 1150 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( 1151 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); 1152 return NewDecl; 1153 } 1154 1155 TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, 1156 StringRef Name) const { 1157 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 1158 TypedefDecl *NewDecl = TypedefDecl::Create( 1159 const_cast<ASTContext &>(*this), getTranslationUnitDecl(), 1160 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); 1161 NewDecl->setImplicit(); 1162 return NewDecl; 1163 } 1164 1165 TypedefDecl *ASTContext::getInt128Decl() const { 1166 if (!Int128Decl) 1167 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); 1168 return Int128Decl; 1169 } 1170 1171 TypedefDecl *ASTContext::getUInt128Decl() const { 1172 if (!UInt128Decl) 1173 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); 1174 return UInt128Decl; 1175 } 1176 1177 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 1178 auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K); 1179 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 1180 Types.push_back(Ty); 1181 } 1182 1183 void ASTContext::InitBuiltinTypes(const TargetInfo &Target, 1184 const TargetInfo *AuxTarget) { 1185 assert((!this->Target || this->Target == &Target) && 1186 "Incorrect target reinitialization"); 1187 assert(VoidTy.isNull() && "Context reinitialized?"); 1188 1189 this->Target = &Target; 1190 this->AuxTarget = AuxTarget; 1191 1192 ABI.reset(createCXXABI(Target)); 1193 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); 1194 1195 // C99 6.2.5p19. 1196 InitBuiltinType(VoidTy, BuiltinType::Void); 1197 1198 // C99 6.2.5p2. 1199 InitBuiltinType(BoolTy, BuiltinType::Bool); 1200 // C99 6.2.5p3. 1201 if (LangOpts.CharIsSigned) 1202 InitBuiltinType(CharTy, BuiltinType::Char_S); 1203 else 1204 InitBuiltinType(CharTy, BuiltinType::Char_U); 1205 // C99 6.2.5p4. 1206 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 1207 InitBuiltinType(ShortTy, BuiltinType::Short); 1208 InitBuiltinType(IntTy, BuiltinType::Int); 1209 InitBuiltinType(LongTy, BuiltinType::Long); 1210 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 1211 1212 // C99 6.2.5p6. 1213 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 1214 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 1215 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 1216 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 1217 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 1218 1219 // C99 6.2.5p10. 1220 InitBuiltinType(FloatTy, BuiltinType::Float); 1221 InitBuiltinType(DoubleTy, BuiltinType::Double); 1222 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 1223 1224 // GNU extension, __float128 for IEEE quadruple precision 1225 InitBuiltinType(Float128Ty, BuiltinType::Float128); 1226 1227 // __ibm128 for IBM extended precision 1228 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128); 1229 1230 // C11 extension ISO/IEC TS 18661-3 1231 InitBuiltinType(Float16Ty, BuiltinType::Float16); 1232 1233 // ISO/IEC JTC1 SC22 WG14 N1169 Extension 1234 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); 1235 InitBuiltinType(AccumTy, BuiltinType::Accum); 1236 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); 1237 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); 1238 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); 1239 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); 1240 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); 1241 InitBuiltinType(FractTy, BuiltinType::Fract); 1242 InitBuiltinType(LongFractTy, BuiltinType::LongFract); 1243 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); 1244 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); 1245 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); 1246 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); 1247 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); 1248 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); 1249 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); 1250 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); 1251 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); 1252 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); 1253 InitBuiltinType(SatFractTy, BuiltinType::SatFract); 1254 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); 1255 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); 1256 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); 1257 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); 1258 1259 // GNU extension, 128-bit integers. 1260 InitBuiltinType(Int128Ty, BuiltinType::Int128); 1261 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 1262 1263 // C++ 3.9.1p5 1264 if (TargetInfo::isTypeSigned(Target.getWCharType())) 1265 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 1266 else // -fshort-wchar makes wchar_t be unsigned. 1267 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 1268 if (LangOpts.CPlusPlus && LangOpts.WChar) 1269 WideCharTy = WCharTy; 1270 else { 1271 // C99 (or C++ using -fno-wchar). 1272 WideCharTy = getFromTargetType(Target.getWCharType()); 1273 } 1274 1275 WIntTy = getFromTargetType(Target.getWIntType()); 1276 1277 // C++20 (proposed) 1278 InitBuiltinType(Char8Ty, BuiltinType::Char8); 1279 1280 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1281 InitBuiltinType(Char16Ty, BuiltinType::Char16); 1282 else // C99 1283 Char16Ty = getFromTargetType(Target.getChar16Type()); 1284 1285 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1286 InitBuiltinType(Char32Ty, BuiltinType::Char32); 1287 else // C99 1288 Char32Ty = getFromTargetType(Target.getChar32Type()); 1289 1290 // Placeholder type for type-dependent expressions whose type is 1291 // completely unknown. No code should ever check a type against 1292 // DependentTy and users should never see it; however, it is here to 1293 // help diagnose failures to properly check for type-dependent 1294 // expressions. 1295 InitBuiltinType(DependentTy, BuiltinType::Dependent); 1296 1297 // Placeholder type for functions. 1298 InitBuiltinType(OverloadTy, BuiltinType::Overload); 1299 1300 // Placeholder type for bound members. 1301 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 1302 1303 // Placeholder type for pseudo-objects. 1304 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); 1305 1306 // "any" type; useful for debugger-like clients. 1307 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 1308 1309 // Placeholder type for unbridged ARC casts. 1310 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 1311 1312 // Placeholder type for builtin functions. 1313 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); 1314 1315 // Placeholder type for OMP array sections. 1316 if (LangOpts.OpenMP) { 1317 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1318 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); 1319 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); 1320 } 1321 // Placeholder type for OpenACC array sections. 1322 if (LangOpts.OpenACC) { 1323 // FIXME: Once we implement OpenACC array sections in Sema, this will either 1324 // be combined with the OpenMP type, or given its own type. In the meantime, 1325 // just use the OpenMP type so that parsing can work. 1326 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1327 } 1328 if (LangOpts.MatrixTypes) 1329 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); 1330 1331 // Builtin types for 'id', 'Class', and 'SEL'. 1332 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 1333 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 1334 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 1335 1336 if (LangOpts.OpenCL) { 1337 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 1338 InitBuiltinType(SingletonId, BuiltinType::Id); 1339 #include "clang/Basic/OpenCLImageTypes.def" 1340 1341 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); 1342 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); 1343 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); 1344 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); 1345 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); 1346 1347 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 1348 InitBuiltinType(Id##Ty, BuiltinType::Id); 1349 #include "clang/Basic/OpenCLExtensionTypes.def" 1350 } 1351 1352 if (Target.hasAArch64SVETypes()) { 1353 #define SVE_TYPE(Name, Id, SingletonId) \ 1354 InitBuiltinType(SingletonId, BuiltinType::Id); 1355 #include "clang/Basic/AArch64SVEACLETypes.def" 1356 } 1357 1358 if (Target.getTriple().isPPC64()) { 1359 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 1360 InitBuiltinType(Id##Ty, BuiltinType::Id); 1361 #include "clang/Basic/PPCTypes.def" 1362 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 1363 InitBuiltinType(Id##Ty, BuiltinType::Id); 1364 #include "clang/Basic/PPCTypes.def" 1365 } 1366 1367 if (Target.hasRISCVVTypes()) { 1368 #define RVV_TYPE(Name, Id, SingletonId) \ 1369 InitBuiltinType(SingletonId, BuiltinType::Id); 1370 #include "clang/Basic/RISCVVTypes.def" 1371 } 1372 1373 if (Target.getTriple().isWasm() && Target.hasFeature("reference-types")) { 1374 #define WASM_TYPE(Name, Id, SingletonId) \ 1375 InitBuiltinType(SingletonId, BuiltinType::Id); 1376 #include "clang/Basic/WebAssemblyReferenceTypes.def" 1377 } 1378 1379 // Builtin type for __objc_yes and __objc_no 1380 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? 1381 SignedCharTy : BoolTy); 1382 1383 ObjCConstantStringType = QualType(); 1384 1385 ObjCSuperType = QualType(); 1386 1387 // void * type 1388 if (LangOpts.OpenCLGenericAddressSpace) { 1389 auto Q = VoidTy.getQualifiers(); 1390 Q.setAddressSpace(LangAS::opencl_generic); 1391 VoidPtrTy = getPointerType(getCanonicalType( 1392 getQualifiedType(VoidTy.getUnqualifiedType(), Q))); 1393 } else { 1394 VoidPtrTy = getPointerType(VoidTy); 1395 } 1396 1397 // nullptr type (C++0x 2.14.7) 1398 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 1399 1400 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 1401 InitBuiltinType(HalfTy, BuiltinType::Half); 1402 1403 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); 1404 1405 // Builtin type used to help define __builtin_va_list. 1406 VaListTagDecl = nullptr; 1407 1408 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. 1409 if (LangOpts.MicrosoftExt || LangOpts.Borland) { 1410 MSGuidTagDecl = buildImplicitRecord("_GUID"); 1411 getTranslationUnitDecl()->addDecl(MSGuidTagDecl); 1412 } 1413 } 1414 1415 DiagnosticsEngine &ASTContext::getDiagnostics() const { 1416 return SourceMgr.getDiagnostics(); 1417 } 1418 1419 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 1420 AttrVec *&Result = DeclAttrs[D]; 1421 if (!Result) { 1422 void *Mem = Allocate(sizeof(AttrVec)); 1423 Result = new (Mem) AttrVec; 1424 } 1425 1426 return *Result; 1427 } 1428 1429 /// Erase the attributes corresponding to the given declaration. 1430 void ASTContext::eraseDeclAttrs(const Decl *D) { 1431 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 1432 if (Pos != DeclAttrs.end()) { 1433 Pos->second->~AttrVec(); 1434 DeclAttrs.erase(Pos); 1435 } 1436 } 1437 1438 // FIXME: Remove ? 1439 MemberSpecializationInfo * 1440 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 1441 assert(Var->isStaticDataMember() && "Not a static data member"); 1442 return getTemplateOrSpecializationInfo(Var) 1443 .dyn_cast<MemberSpecializationInfo *>(); 1444 } 1445 1446 ASTContext::TemplateOrSpecializationInfo 1447 ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { 1448 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = 1449 TemplateOrInstantiation.find(Var); 1450 if (Pos == TemplateOrInstantiation.end()) 1451 return {}; 1452 1453 return Pos->second; 1454 } 1455 1456 void 1457 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 1458 TemplateSpecializationKind TSK, 1459 SourceLocation PointOfInstantiation) { 1460 assert(Inst->isStaticDataMember() && "Not a static data member"); 1461 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 1462 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( 1463 Tmpl, TSK, PointOfInstantiation)); 1464 } 1465 1466 void 1467 ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, 1468 TemplateOrSpecializationInfo TSI) { 1469 assert(!TemplateOrInstantiation[Inst] && 1470 "Already noted what the variable was instantiated from"); 1471 TemplateOrInstantiation[Inst] = TSI; 1472 } 1473 1474 NamedDecl * 1475 ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { 1476 return InstantiatedFromUsingDecl.lookup(UUD); 1477 } 1478 1479 void 1480 ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { 1481 assert((isa<UsingDecl>(Pattern) || 1482 isa<UnresolvedUsingValueDecl>(Pattern) || 1483 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 1484 "pattern decl is not a using decl"); 1485 assert((isa<UsingDecl>(Inst) || 1486 isa<UnresolvedUsingValueDecl>(Inst) || 1487 isa<UnresolvedUsingTypenameDecl>(Inst)) && 1488 "instantiation did not produce a using decl"); 1489 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 1490 InstantiatedFromUsingDecl[Inst] = Pattern; 1491 } 1492 1493 UsingEnumDecl * 1494 ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { 1495 return InstantiatedFromUsingEnumDecl.lookup(UUD); 1496 } 1497 1498 void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, 1499 UsingEnumDecl *Pattern) { 1500 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists"); 1501 InstantiatedFromUsingEnumDecl[Inst] = Pattern; 1502 } 1503 1504 UsingShadowDecl * 1505 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 1506 return InstantiatedFromUsingShadowDecl.lookup(Inst); 1507 } 1508 1509 void 1510 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 1511 UsingShadowDecl *Pattern) { 1512 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 1513 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 1514 } 1515 1516 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 1517 return InstantiatedFromUnnamedFieldDecl.lookup(Field); 1518 } 1519 1520 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 1521 FieldDecl *Tmpl) { 1522 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 1523 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 1524 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 1525 "Already noted what unnamed field was instantiated from"); 1526 1527 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 1528 } 1529 1530 ASTContext::overridden_cxx_method_iterator 1531 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 1532 return overridden_methods(Method).begin(); 1533 } 1534 1535 ASTContext::overridden_cxx_method_iterator 1536 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 1537 return overridden_methods(Method).end(); 1538 } 1539 1540 unsigned 1541 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 1542 auto Range = overridden_methods(Method); 1543 return Range.end() - Range.begin(); 1544 } 1545 1546 ASTContext::overridden_method_range 1547 ASTContext::overridden_methods(const CXXMethodDecl *Method) const { 1548 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1549 OverriddenMethods.find(Method->getCanonicalDecl()); 1550 if (Pos == OverriddenMethods.end()) 1551 return overridden_method_range(nullptr, nullptr); 1552 return overridden_method_range(Pos->second.begin(), Pos->second.end()); 1553 } 1554 1555 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 1556 const CXXMethodDecl *Overridden) { 1557 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); 1558 OverriddenMethods[Method].push_back(Overridden); 1559 } 1560 1561 void ASTContext::getOverriddenMethods( 1562 const NamedDecl *D, 1563 SmallVectorImpl<const NamedDecl *> &Overridden) const { 1564 assert(D); 1565 1566 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { 1567 Overridden.append(overridden_methods_begin(CXXMethod), 1568 overridden_methods_end(CXXMethod)); 1569 return; 1570 } 1571 1572 const auto *Method = dyn_cast<ObjCMethodDecl>(D); 1573 if (!Method) 1574 return; 1575 1576 SmallVector<const ObjCMethodDecl *, 8> OverDecls; 1577 Method->getOverriddenMethods(OverDecls); 1578 Overridden.append(OverDecls.begin(), OverDecls.end()); 1579 } 1580 1581 void ASTContext::addedLocalImportDecl(ImportDecl *Import) { 1582 assert(!Import->getNextLocalImport() && 1583 "Import declaration already in the chain"); 1584 assert(!Import->isFromASTFile() && "Non-local import declaration"); 1585 if (!FirstLocalImport) { 1586 FirstLocalImport = Import; 1587 LastLocalImport = Import; 1588 return; 1589 } 1590 1591 LastLocalImport->setNextLocalImport(Import); 1592 LastLocalImport = Import; 1593 } 1594 1595 //===----------------------------------------------------------------------===// 1596 // Type Sizing and Analysis 1597 //===----------------------------------------------------------------------===// 1598 1599 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 1600 /// scalar floating point type. 1601 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 1602 switch (T->castAs<BuiltinType>()->getKind()) { 1603 default: 1604 llvm_unreachable("Not a floating point type!"); 1605 case BuiltinType::BFloat16: 1606 return Target->getBFloat16Format(); 1607 case BuiltinType::Float16: 1608 return Target->getHalfFormat(); 1609 case BuiltinType::Half: 1610 // For HLSL, when the native half type is disabled, half will be treat as 1611 // float. 1612 if (getLangOpts().HLSL) 1613 if (getLangOpts().NativeHalfType) 1614 return Target->getHalfFormat(); 1615 else 1616 return Target->getFloatFormat(); 1617 else 1618 return Target->getHalfFormat(); 1619 case BuiltinType::Float: return Target->getFloatFormat(); 1620 case BuiltinType::Double: return Target->getDoubleFormat(); 1621 case BuiltinType::Ibm128: 1622 return Target->getIbm128Format(); 1623 case BuiltinType::LongDouble: 1624 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice) 1625 return AuxTarget->getLongDoubleFormat(); 1626 return Target->getLongDoubleFormat(); 1627 case BuiltinType::Float128: 1628 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice) 1629 return AuxTarget->getFloat128Format(); 1630 return Target->getFloat128Format(); 1631 } 1632 } 1633 1634 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { 1635 unsigned Align = Target->getCharWidth(); 1636 1637 const unsigned AlignFromAttr = D->getMaxAlignment(); 1638 if (AlignFromAttr) 1639 Align = AlignFromAttr; 1640 1641 // __attribute__((aligned)) can increase or decrease alignment 1642 // *except* on a struct or struct member, where it only increases 1643 // alignment unless 'packed' is also specified. 1644 // 1645 // It is an error for alignas to decrease alignment, so we can 1646 // ignore that possibility; Sema should diagnose it. 1647 bool UseAlignAttrOnly; 1648 if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) 1649 UseAlignAttrOnly = 1650 FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>(); 1651 else 1652 UseAlignAttrOnly = AlignFromAttr != 0; 1653 // If we're using the align attribute only, just ignore everything 1654 // else about the declaration and its type. 1655 if (UseAlignAttrOnly) { 1656 // do nothing 1657 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) { 1658 QualType T = VD->getType(); 1659 if (const auto *RT = T->getAs<ReferenceType>()) { 1660 if (ForAlignof) 1661 T = RT->getPointeeType(); 1662 else 1663 T = getPointerType(RT->getPointeeType()); 1664 } 1665 QualType BaseT = getBaseElementType(T); 1666 if (T->isFunctionType()) 1667 Align = getTypeInfoImpl(T.getTypePtr()).Align; 1668 else if (!BaseT->isIncompleteType()) { 1669 // Adjust alignments of declarations with array type by the 1670 // large-array alignment on the target. 1671 if (const ArrayType *arrayType = getAsArrayType(T)) { 1672 unsigned MinWidth = Target->getLargeArrayMinWidth(); 1673 if (!ForAlignof && MinWidth) { 1674 if (isa<VariableArrayType>(arrayType)) 1675 Align = std::max(Align, Target->getLargeArrayAlign()); 1676 else if (isa<ConstantArrayType>(arrayType) && 1677 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 1678 Align = std::max(Align, Target->getLargeArrayAlign()); 1679 } 1680 } 1681 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 1682 if (BaseT.getQualifiers().hasUnaligned()) 1683 Align = Target->getCharWidth(); 1684 } 1685 1686 // Ensure miminum alignment for global variables. 1687 if (const auto *VD = dyn_cast<VarDecl>(D)) 1688 if (VD->hasGlobalStorage() && !ForAlignof) { 1689 uint64_t TypeSize = 1690 !BaseT->isIncompleteType() ? getTypeSize(T.getTypePtr()) : 0; 1691 Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); 1692 } 1693 1694 // Fields can be subject to extra alignment constraints, like if 1695 // the field is packed, the struct is packed, or the struct has a 1696 // a max-field-alignment constraint (#pragma pack). So calculate 1697 // the actual alignment of the field within the struct, and then 1698 // (as we're expected to) constrain that by the alignment of the type. 1699 if (const auto *Field = dyn_cast<FieldDecl>(VD)) { 1700 const RecordDecl *Parent = Field->getParent(); 1701 // We can only produce a sensible answer if the record is valid. 1702 if (!Parent->isInvalidDecl()) { 1703 const ASTRecordLayout &Layout = getASTRecordLayout(Parent); 1704 1705 // Start with the record's overall alignment. 1706 unsigned FieldAlign = toBits(Layout.getAlignment()); 1707 1708 // Use the GCD of that and the offset within the record. 1709 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); 1710 if (Offset > 0) { 1711 // Alignment is always a power of 2, so the GCD will be a power of 2, 1712 // which means we get to do this crazy thing instead of Euclid's. 1713 uint64_t LowBitOfOffset = Offset & (~Offset + 1); 1714 if (LowBitOfOffset < FieldAlign) 1715 FieldAlign = static_cast<unsigned>(LowBitOfOffset); 1716 } 1717 1718 Align = std::min(Align, FieldAlign); 1719 } 1720 } 1721 } 1722 1723 // Some targets have hard limitation on the maximum requestable alignment in 1724 // aligned attribute for static variables. 1725 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); 1726 const auto *VD = dyn_cast<VarDecl>(D); 1727 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) 1728 Align = std::min(Align, MaxAlignedAttr); 1729 1730 return toCharUnitsFromBits(Align); 1731 } 1732 1733 CharUnits ASTContext::getExnObjectAlignment() const { 1734 return toCharUnitsFromBits(Target->getExnObjectAlignment()); 1735 } 1736 1737 // getTypeInfoDataSizeInChars - Return the size of a type, in 1738 // chars. If the type is a record, its data size is returned. This is 1739 // the size of the memcpy that's performed when assigning this type 1740 // using a trivial copy/move assignment operator. 1741 TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { 1742 TypeInfoChars Info = getTypeInfoInChars(T); 1743 1744 // In C++, objects can sometimes be allocated into the tail padding 1745 // of a base-class subobject. We decide whether that's possible 1746 // during class layout, so here we can just trust the layout results. 1747 if (getLangOpts().CPlusPlus) { 1748 if (const auto *RT = T->getAs<RecordType>()) { 1749 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); 1750 Info.Width = layout.getDataSize(); 1751 } 1752 } 1753 1754 return Info; 1755 } 1756 1757 /// getConstantArrayInfoInChars - Performing the computation in CharUnits 1758 /// instead of in bits prevents overflowing the uint64_t for some large arrays. 1759 TypeInfoChars 1760 static getConstantArrayInfoInChars(const ASTContext &Context, 1761 const ConstantArrayType *CAT) { 1762 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); 1763 uint64_t Size = CAT->getSize().getZExtValue(); 1764 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <= 1765 (uint64_t)(-1)/Size) && 1766 "Overflow in array type char size evaluation"); 1767 uint64_t Width = EltInfo.Width.getQuantity() * Size; 1768 unsigned Align = EltInfo.Align.getQuantity(); 1769 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || 1770 Context.getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1771 Width = llvm::alignTo(Width, Align); 1772 return TypeInfoChars(CharUnits::fromQuantity(Width), 1773 CharUnits::fromQuantity(Align), 1774 EltInfo.AlignRequirement); 1775 } 1776 1777 TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { 1778 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1779 return getConstantArrayInfoInChars(*this, CAT); 1780 TypeInfo Info = getTypeInfo(T); 1781 return TypeInfoChars(toCharUnitsFromBits(Info.Width), 1782 toCharUnitsFromBits(Info.Align), Info.AlignRequirement); 1783 } 1784 1785 TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { 1786 return getTypeInfoInChars(T.getTypePtr()); 1787 } 1788 1789 bool ASTContext::isPromotableIntegerType(QualType T) const { 1790 // HLSL doesn't promote all small integer types to int, it 1791 // just uses the rank-based promotion rules for all types. 1792 if (getLangOpts().HLSL) 1793 return false; 1794 1795 if (const auto *BT = T->getAs<BuiltinType>()) 1796 switch (BT->getKind()) { 1797 case BuiltinType::Bool: 1798 case BuiltinType::Char_S: 1799 case BuiltinType::Char_U: 1800 case BuiltinType::SChar: 1801 case BuiltinType::UChar: 1802 case BuiltinType::Short: 1803 case BuiltinType::UShort: 1804 case BuiltinType::WChar_S: 1805 case BuiltinType::WChar_U: 1806 case BuiltinType::Char8: 1807 case BuiltinType::Char16: 1808 case BuiltinType::Char32: 1809 return true; 1810 default: 1811 return false; 1812 } 1813 1814 // Enumerated types are promotable to their compatible integer types 1815 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2). 1816 if (const auto *ET = T->getAs<EnumType>()) { 1817 if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() || 1818 ET->getDecl()->isScoped()) 1819 return false; 1820 1821 return true; 1822 } 1823 1824 return false; 1825 } 1826 1827 bool ASTContext::isAlignmentRequired(const Type *T) const { 1828 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None; 1829 } 1830 1831 bool ASTContext::isAlignmentRequired(QualType T) const { 1832 return isAlignmentRequired(T.getTypePtr()); 1833 } 1834 1835 unsigned ASTContext::getTypeAlignIfKnown(QualType T, 1836 bool NeedsPreferredAlignment) const { 1837 // An alignment on a typedef overrides anything else. 1838 if (const auto *TT = T->getAs<TypedefType>()) 1839 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1840 return Align; 1841 1842 // If we have an (array of) complete type, we're done. 1843 T = getBaseElementType(T); 1844 if (!T->isIncompleteType()) 1845 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T); 1846 1847 // If we had an array type, its element type might be a typedef 1848 // type with an alignment attribute. 1849 if (const auto *TT = T->getAs<TypedefType>()) 1850 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1851 return Align; 1852 1853 // Otherwise, see if the declaration of the type had an attribute. 1854 if (const auto *TT = T->getAs<TagType>()) 1855 return TT->getDecl()->getMaxAlignment(); 1856 1857 return 0; 1858 } 1859 1860 TypeInfo ASTContext::getTypeInfo(const Type *T) const { 1861 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); 1862 if (I != MemoizedTypeInfo.end()) 1863 return I->second; 1864 1865 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. 1866 TypeInfo TI = getTypeInfoImpl(T); 1867 MemoizedTypeInfo[T] = TI; 1868 return TI; 1869 } 1870 1871 /// getTypeInfoImpl - Return the size of the specified type, in bits. This 1872 /// method does not work on incomplete types. 1873 /// 1874 /// FIXME: Pointers into different addr spaces could have different sizes and 1875 /// alignment requirements: getPointerInfo should take an AddrSpace, this 1876 /// should take a QualType, &c. 1877 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { 1878 uint64_t Width = 0; 1879 unsigned Align = 8; 1880 AlignRequirementKind AlignRequirement = AlignRequirementKind::None; 1881 LangAS AS = LangAS::Default; 1882 switch (T->getTypeClass()) { 1883 #define TYPE(Class, Base) 1884 #define ABSTRACT_TYPE(Class, Base) 1885 #define NON_CANONICAL_TYPE(Class, Base) 1886 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1887 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ 1888 case Type::Class: \ 1889 assert(!T->isDependentType() && "should not see dependent types here"); \ 1890 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); 1891 #include "clang/AST/TypeNodes.inc" 1892 llvm_unreachable("Should not see dependent types"); 1893 1894 case Type::FunctionNoProto: 1895 case Type::FunctionProto: 1896 // GCC extension: alignof(function) = 32 bits 1897 Width = 0; 1898 Align = 32; 1899 break; 1900 1901 case Type::IncompleteArray: 1902 case Type::VariableArray: 1903 case Type::ConstantArray: { 1904 // Model non-constant sized arrays as size zero, but track the alignment. 1905 uint64_t Size = 0; 1906 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1907 Size = CAT->getSize().getZExtValue(); 1908 1909 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); 1910 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && 1911 "Overflow in array type bit size evaluation"); 1912 Width = EltInfo.Width * Size; 1913 Align = EltInfo.Align; 1914 AlignRequirement = EltInfo.AlignRequirement; 1915 if (!getTargetInfo().getCXXABI().isMicrosoft() || 1916 getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1917 Width = llvm::alignTo(Width, Align); 1918 break; 1919 } 1920 1921 case Type::ExtVector: 1922 case Type::Vector: { 1923 const auto *VT = cast<VectorType>(T); 1924 TypeInfo EltInfo = getTypeInfo(VT->getElementType()); 1925 Width = VT->isExtVectorBoolType() ? VT->getNumElements() 1926 : EltInfo.Width * VT->getNumElements(); 1927 // Enforce at least byte size and alignment. 1928 Width = std::max<unsigned>(8, Width); 1929 Align = std::max<unsigned>(8, Width); 1930 1931 // If the alignment is not a power of 2, round up to the next power of 2. 1932 // This happens for non-power-of-2 length vectors. 1933 if (Align & (Align-1)) { 1934 Align = llvm::bit_ceil(Align); 1935 Width = llvm::alignTo(Width, Align); 1936 } 1937 // Adjust the alignment based on the target max. 1938 uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); 1939 if (TargetVectorAlign && TargetVectorAlign < Align) 1940 Align = TargetVectorAlign; 1941 if (VT->getVectorKind() == VectorKind::SveFixedLengthData) 1942 // Adjust the alignment for fixed-length SVE vectors. This is important 1943 // for non-power-of-2 vector lengths. 1944 Align = 128; 1945 else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) 1946 // Adjust the alignment for fixed-length SVE predicates. 1947 Align = 16; 1948 else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData) 1949 // Adjust the alignment for fixed-length RVV vectors. 1950 Align = std::min<unsigned>(64, Width); 1951 break; 1952 } 1953 1954 case Type::ConstantMatrix: { 1955 const auto *MT = cast<ConstantMatrixType>(T); 1956 TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); 1957 // The internal layout of a matrix value is implementation defined. 1958 // Initially be ABI compatible with arrays with respect to alignment and 1959 // size. 1960 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); 1961 Align = ElementInfo.Align; 1962 break; 1963 } 1964 1965 case Type::Builtin: 1966 switch (cast<BuiltinType>(T)->getKind()) { 1967 default: llvm_unreachable("Unknown builtin type!"); 1968 case BuiltinType::Void: 1969 // GCC extension: alignof(void) = 8 bits. 1970 Width = 0; 1971 Align = 8; 1972 break; 1973 case BuiltinType::Bool: 1974 Width = Target->getBoolWidth(); 1975 Align = Target->getBoolAlign(); 1976 break; 1977 case BuiltinType::Char_S: 1978 case BuiltinType::Char_U: 1979 case BuiltinType::UChar: 1980 case BuiltinType::SChar: 1981 case BuiltinType::Char8: 1982 Width = Target->getCharWidth(); 1983 Align = Target->getCharAlign(); 1984 break; 1985 case BuiltinType::WChar_S: 1986 case BuiltinType::WChar_U: 1987 Width = Target->getWCharWidth(); 1988 Align = Target->getWCharAlign(); 1989 break; 1990 case BuiltinType::Char16: 1991 Width = Target->getChar16Width(); 1992 Align = Target->getChar16Align(); 1993 break; 1994 case BuiltinType::Char32: 1995 Width = Target->getChar32Width(); 1996 Align = Target->getChar32Align(); 1997 break; 1998 case BuiltinType::UShort: 1999 case BuiltinType::Short: 2000 Width = Target->getShortWidth(); 2001 Align = Target->getShortAlign(); 2002 break; 2003 case BuiltinType::UInt: 2004 case BuiltinType::Int: 2005 Width = Target->getIntWidth(); 2006 Align = Target->getIntAlign(); 2007 break; 2008 case BuiltinType::ULong: 2009 case BuiltinType::Long: 2010 Width = Target->getLongWidth(); 2011 Align = Target->getLongAlign(); 2012 break; 2013 case BuiltinType::ULongLong: 2014 case BuiltinType::LongLong: 2015 Width = Target->getLongLongWidth(); 2016 Align = Target->getLongLongAlign(); 2017 break; 2018 case BuiltinType::Int128: 2019 case BuiltinType::UInt128: 2020 Width = 128; 2021 Align = Target->getInt128Align(); 2022 break; 2023 case BuiltinType::ShortAccum: 2024 case BuiltinType::UShortAccum: 2025 case BuiltinType::SatShortAccum: 2026 case BuiltinType::SatUShortAccum: 2027 Width = Target->getShortAccumWidth(); 2028 Align = Target->getShortAccumAlign(); 2029 break; 2030 case BuiltinType::Accum: 2031 case BuiltinType::UAccum: 2032 case BuiltinType::SatAccum: 2033 case BuiltinType::SatUAccum: 2034 Width = Target->getAccumWidth(); 2035 Align = Target->getAccumAlign(); 2036 break; 2037 case BuiltinType::LongAccum: 2038 case BuiltinType::ULongAccum: 2039 case BuiltinType::SatLongAccum: 2040 case BuiltinType::SatULongAccum: 2041 Width = Target->getLongAccumWidth(); 2042 Align = Target->getLongAccumAlign(); 2043 break; 2044 case BuiltinType::ShortFract: 2045 case BuiltinType::UShortFract: 2046 case BuiltinType::SatShortFract: 2047 case BuiltinType::SatUShortFract: 2048 Width = Target->getShortFractWidth(); 2049 Align = Target->getShortFractAlign(); 2050 break; 2051 case BuiltinType::Fract: 2052 case BuiltinType::UFract: 2053 case BuiltinType::SatFract: 2054 case BuiltinType::SatUFract: 2055 Width = Target->getFractWidth(); 2056 Align = Target->getFractAlign(); 2057 break; 2058 case BuiltinType::LongFract: 2059 case BuiltinType::ULongFract: 2060 case BuiltinType::SatLongFract: 2061 case BuiltinType::SatULongFract: 2062 Width = Target->getLongFractWidth(); 2063 Align = Target->getLongFractAlign(); 2064 break; 2065 case BuiltinType::BFloat16: 2066 if (Target->hasBFloat16Type()) { 2067 Width = Target->getBFloat16Width(); 2068 Align = Target->getBFloat16Align(); 2069 } else if ((getLangOpts().SYCLIsDevice || 2070 (getLangOpts().OpenMP && 2071 getLangOpts().OpenMPIsTargetDevice)) && 2072 AuxTarget->hasBFloat16Type()) { 2073 Width = AuxTarget->getBFloat16Width(); 2074 Align = AuxTarget->getBFloat16Align(); 2075 } 2076 break; 2077 case BuiltinType::Float16: 2078 case BuiltinType::Half: 2079 if (Target->hasFloat16Type() || !getLangOpts().OpenMP || 2080 !getLangOpts().OpenMPIsTargetDevice) { 2081 Width = Target->getHalfWidth(); 2082 Align = Target->getHalfAlign(); 2083 } else { 2084 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2085 "Expected OpenMP device compilation."); 2086 Width = AuxTarget->getHalfWidth(); 2087 Align = AuxTarget->getHalfAlign(); 2088 } 2089 break; 2090 case BuiltinType::Float: 2091 Width = Target->getFloatWidth(); 2092 Align = Target->getFloatAlign(); 2093 break; 2094 case BuiltinType::Double: 2095 Width = Target->getDoubleWidth(); 2096 Align = Target->getDoubleAlign(); 2097 break; 2098 case BuiltinType::Ibm128: 2099 Width = Target->getIbm128Width(); 2100 Align = Target->getIbm128Align(); 2101 break; 2102 case BuiltinType::LongDouble: 2103 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2104 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || 2105 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { 2106 Width = AuxTarget->getLongDoubleWidth(); 2107 Align = AuxTarget->getLongDoubleAlign(); 2108 } else { 2109 Width = Target->getLongDoubleWidth(); 2110 Align = Target->getLongDoubleAlign(); 2111 } 2112 break; 2113 case BuiltinType::Float128: 2114 if (Target->hasFloat128Type() || !getLangOpts().OpenMP || 2115 !getLangOpts().OpenMPIsTargetDevice) { 2116 Width = Target->getFloat128Width(); 2117 Align = Target->getFloat128Align(); 2118 } else { 2119 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2120 "Expected OpenMP device compilation."); 2121 Width = AuxTarget->getFloat128Width(); 2122 Align = AuxTarget->getFloat128Align(); 2123 } 2124 break; 2125 case BuiltinType::NullPtr: 2126 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*) 2127 Width = Target->getPointerWidth(LangAS::Default); 2128 Align = Target->getPointerAlign(LangAS::Default); 2129 break; 2130 case BuiltinType::ObjCId: 2131 case BuiltinType::ObjCClass: 2132 case BuiltinType::ObjCSel: 2133 Width = Target->getPointerWidth(LangAS::Default); 2134 Align = Target->getPointerAlign(LangAS::Default); 2135 break; 2136 case BuiltinType::OCLSampler: 2137 case BuiltinType::OCLEvent: 2138 case BuiltinType::OCLClkEvent: 2139 case BuiltinType::OCLQueue: 2140 case BuiltinType::OCLReserveID: 2141 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 2142 case BuiltinType::Id: 2143 #include "clang/Basic/OpenCLImageTypes.def" 2144 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 2145 case BuiltinType::Id: 2146 #include "clang/Basic/OpenCLExtensionTypes.def" 2147 AS = Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 2148 Width = Target->getPointerWidth(AS); 2149 Align = Target->getPointerAlign(AS); 2150 break; 2151 // The SVE types are effectively target-specific. The length of an 2152 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple 2153 // of 128 bits. There is one predicate bit for each vector byte, so the 2154 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. 2155 // 2156 // Because the length is only known at runtime, we use a dummy value 2157 // of 0 for the static length. The alignment values are those defined 2158 // by the Procedure Call Standard for the Arm Architecture. 2159 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 2160 IsSigned, IsFP, IsBF) \ 2161 case BuiltinType::Id: \ 2162 Width = 0; \ 2163 Align = 128; \ 2164 break; 2165 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 2166 case BuiltinType::Id: \ 2167 Width = 0; \ 2168 Align = 16; \ 2169 break; 2170 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \ 2171 case BuiltinType::Id: \ 2172 Width = 0; \ 2173 Align = 16; \ 2174 break; 2175 #include "clang/Basic/AArch64SVEACLETypes.def" 2176 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 2177 case BuiltinType::Id: \ 2178 Width = Size; \ 2179 Align = Size; \ 2180 break; 2181 #include "clang/Basic/PPCTypes.def" 2182 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ 2183 IsFP, IsBF) \ 2184 case BuiltinType::Id: \ 2185 Width = 0; \ 2186 Align = ElBits; \ 2187 break; 2188 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \ 2189 case BuiltinType::Id: \ 2190 Width = 0; \ 2191 Align = 8; \ 2192 break; 2193 #include "clang/Basic/RISCVVTypes.def" 2194 #define WASM_TYPE(Name, Id, SingletonId) \ 2195 case BuiltinType::Id: \ 2196 Width = 0; \ 2197 Align = 8; \ 2198 break; 2199 #include "clang/Basic/WebAssemblyReferenceTypes.def" 2200 } 2201 break; 2202 case Type::ObjCObjectPointer: 2203 Width = Target->getPointerWidth(LangAS::Default); 2204 Align = Target->getPointerAlign(LangAS::Default); 2205 break; 2206 case Type::BlockPointer: 2207 AS = cast<BlockPointerType>(T)->getPointeeType().getAddressSpace(); 2208 Width = Target->getPointerWidth(AS); 2209 Align = Target->getPointerAlign(AS); 2210 break; 2211 case Type::LValueReference: 2212 case Type::RValueReference: 2213 // alignof and sizeof should never enter this code path here, so we go 2214 // the pointer route. 2215 AS = cast<ReferenceType>(T)->getPointeeType().getAddressSpace(); 2216 Width = Target->getPointerWidth(AS); 2217 Align = Target->getPointerAlign(AS); 2218 break; 2219 case Type::Pointer: 2220 AS = cast<PointerType>(T)->getPointeeType().getAddressSpace(); 2221 Width = Target->getPointerWidth(AS); 2222 Align = Target->getPointerAlign(AS); 2223 break; 2224 case Type::MemberPointer: { 2225 const auto *MPT = cast<MemberPointerType>(T); 2226 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); 2227 Width = MPI.Width; 2228 Align = MPI.Align; 2229 break; 2230 } 2231 case Type::Complex: { 2232 // Complex types have the same alignment as their elements, but twice the 2233 // size. 2234 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType()); 2235 Width = EltInfo.Width * 2; 2236 Align = EltInfo.Align; 2237 break; 2238 } 2239 case Type::ObjCObject: 2240 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 2241 case Type::Adjusted: 2242 case Type::Decayed: 2243 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr()); 2244 case Type::ObjCInterface: { 2245 const auto *ObjCI = cast<ObjCInterfaceType>(T); 2246 if (ObjCI->getDecl()->isInvalidDecl()) { 2247 Width = 8; 2248 Align = 8; 2249 break; 2250 } 2251 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2252 Width = toBits(Layout.getSize()); 2253 Align = toBits(Layout.getAlignment()); 2254 break; 2255 } 2256 case Type::BitInt: { 2257 const auto *EIT = cast<BitIntType>(T); 2258 Align = std::clamp<unsigned>(llvm::PowerOf2Ceil(EIT->getNumBits()), 2259 getCharWidth(), Target->getLongLongAlign()); 2260 Width = llvm::alignTo(EIT->getNumBits(), Align); 2261 break; 2262 } 2263 case Type::Record: 2264 case Type::Enum: { 2265 const auto *TT = cast<TagType>(T); 2266 2267 if (TT->getDecl()->isInvalidDecl()) { 2268 Width = 8; 2269 Align = 8; 2270 break; 2271 } 2272 2273 if (const auto *ET = dyn_cast<EnumType>(TT)) { 2274 const EnumDecl *ED = ET->getDecl(); 2275 TypeInfo Info = 2276 getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); 2277 if (unsigned AttrAlign = ED->getMaxAlignment()) { 2278 Info.Align = AttrAlign; 2279 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum; 2280 } 2281 return Info; 2282 } 2283 2284 const auto *RT = cast<RecordType>(TT); 2285 const RecordDecl *RD = RT->getDecl(); 2286 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2287 Width = toBits(Layout.getSize()); 2288 Align = toBits(Layout.getAlignment()); 2289 AlignRequirement = RD->hasAttr<AlignedAttr>() 2290 ? AlignRequirementKind::RequiredByRecord 2291 : AlignRequirementKind::None; 2292 break; 2293 } 2294 2295 case Type::SubstTemplateTypeParm: 2296 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 2297 getReplacementType().getTypePtr()); 2298 2299 case Type::Auto: 2300 case Type::DeducedTemplateSpecialization: { 2301 const auto *A = cast<DeducedType>(T); 2302 assert(!A->getDeducedType().isNull() && 2303 "cannot request the size of an undeduced or dependent auto type"); 2304 return getTypeInfo(A->getDeducedType().getTypePtr()); 2305 } 2306 2307 case Type::Paren: 2308 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 2309 2310 case Type::MacroQualified: 2311 return getTypeInfo( 2312 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr()); 2313 2314 case Type::ObjCTypeParam: 2315 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr()); 2316 2317 case Type::Using: 2318 return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr()); 2319 2320 case Type::Typedef: { 2321 const auto *TT = cast<TypedefType>(T); 2322 TypeInfo Info = getTypeInfo(TT->desugar().getTypePtr()); 2323 // If the typedef has an aligned attribute on it, it overrides any computed 2324 // alignment we have. This violates the GCC documentation (which says that 2325 // attribute(aligned) can only round up) but matches its implementation. 2326 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) { 2327 Align = AttrAlign; 2328 AlignRequirement = AlignRequirementKind::RequiredByTypedef; 2329 } else { 2330 Align = Info.Align; 2331 AlignRequirement = Info.AlignRequirement; 2332 } 2333 Width = Info.Width; 2334 break; 2335 } 2336 2337 case Type::Elaborated: 2338 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 2339 2340 case Type::Attributed: 2341 return getTypeInfo( 2342 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 2343 2344 case Type::BTFTagAttributed: 2345 return getTypeInfo( 2346 cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr()); 2347 2348 case Type::Atomic: { 2349 // Start with the base type information. 2350 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); 2351 Width = Info.Width; 2352 Align = Info.Align; 2353 2354 if (!Width) { 2355 // An otherwise zero-sized type should still generate an 2356 // atomic operation. 2357 Width = Target->getCharWidth(); 2358 assert(Align); 2359 } else if (Width <= Target->getMaxAtomicPromoteWidth()) { 2360 // If the size of the type doesn't exceed the platform's max 2361 // atomic promotion width, make the size and alignment more 2362 // favorable to atomic operations: 2363 2364 // Round the size up to a power of 2. 2365 Width = llvm::bit_ceil(Width); 2366 2367 // Set the alignment equal to the size. 2368 Align = static_cast<unsigned>(Width); 2369 } 2370 } 2371 break; 2372 2373 case Type::Pipe: 2374 Width = Target->getPointerWidth(LangAS::opencl_global); 2375 Align = Target->getPointerAlign(LangAS::opencl_global); 2376 break; 2377 } 2378 2379 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 2380 return TypeInfo(Width, Align, AlignRequirement); 2381 } 2382 2383 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { 2384 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); 2385 if (I != MemoizedUnadjustedAlign.end()) 2386 return I->second; 2387 2388 unsigned UnadjustedAlign; 2389 if (const auto *RT = T->getAs<RecordType>()) { 2390 const RecordDecl *RD = RT->getDecl(); 2391 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2392 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2393 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) { 2394 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2395 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2396 } else { 2397 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); 2398 } 2399 2400 MemoizedUnadjustedAlign[T] = UnadjustedAlign; 2401 return UnadjustedAlign; 2402 } 2403 2404 unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { 2405 unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign( 2406 getTargetInfo().getTriple(), Target->getTargetOpts().FeatureMap); 2407 return SimdAlign; 2408 } 2409 2410 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 2411 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 2412 return CharUnits::fromQuantity(BitSize / getCharWidth()); 2413 } 2414 2415 /// toBits - Convert a size in characters to a size in characters. 2416 int64_t ASTContext::toBits(CharUnits CharSize) const { 2417 return CharSize.getQuantity() * getCharWidth(); 2418 } 2419 2420 /// getTypeSizeInChars - Return the size of the specified type, in characters. 2421 /// This method does not work on incomplete types. 2422 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 2423 return getTypeInfoInChars(T).Width; 2424 } 2425 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 2426 return getTypeInfoInChars(T).Width; 2427 } 2428 2429 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 2430 /// characters. This method does not work on incomplete types. 2431 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 2432 return toCharUnitsFromBits(getTypeAlign(T)); 2433 } 2434 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 2435 return toCharUnitsFromBits(getTypeAlign(T)); 2436 } 2437 2438 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a 2439 /// type, in characters, before alignment adjustments. This method does 2440 /// not work on incomplete types. 2441 CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { 2442 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2443 } 2444 CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { 2445 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2446 } 2447 2448 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 2449 /// type for the current target in bits. This can be different than the ABI 2450 /// alignment in cases where it is beneficial for performance or backwards 2451 /// compatibility preserving to overalign a data type. (Note: despite the name, 2452 /// the preferred alignment is ABI-impacting, and not an optimization.) 2453 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 2454 TypeInfo TI = getTypeInfo(T); 2455 unsigned ABIAlign = TI.Align; 2456 2457 T = T->getBaseElementTypeUnsafe(); 2458 2459 // The preferred alignment of member pointers is that of a pointer. 2460 if (T->isMemberPointerType()) 2461 return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); 2462 2463 if (!Target->allowsLargerPreferedTypeAlignment()) 2464 return ABIAlign; 2465 2466 if (const auto *RT = T->getAs<RecordType>()) { 2467 const RecordDecl *RD = RT->getDecl(); 2468 2469 // When used as part of a typedef, or together with a 'packed' attribute, 2470 // the 'aligned' attribute can be used to decrease alignment. Note that the 2471 // 'packed' case is already taken into consideration when computing the 2472 // alignment, we only need to handle the typedef case here. 2473 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef || 2474 RD->isInvalidDecl()) 2475 return ABIAlign; 2476 2477 unsigned PreferredAlign = static_cast<unsigned>( 2478 toBits(getASTRecordLayout(RD).PreferredAlignment)); 2479 assert(PreferredAlign >= ABIAlign && 2480 "PreferredAlign should be at least as large as ABIAlign."); 2481 return PreferredAlign; 2482 } 2483 2484 // Double (and, for targets supporting AIX `power` alignment, long double) and 2485 // long long should be naturally aligned (despite requiring less alignment) if 2486 // possible. 2487 if (const auto *CT = T->getAs<ComplexType>()) 2488 T = CT->getElementType().getTypePtr(); 2489 if (const auto *ET = T->getAs<EnumType>()) 2490 T = ET->getDecl()->getIntegerType().getTypePtr(); 2491 if (T->isSpecificBuiltinType(BuiltinType::Double) || 2492 T->isSpecificBuiltinType(BuiltinType::LongLong) || 2493 T->isSpecificBuiltinType(BuiltinType::ULongLong) || 2494 (T->isSpecificBuiltinType(BuiltinType::LongDouble) && 2495 Target->defaultsToAIXPowerAlignment())) 2496 // Don't increase the alignment if an alignment attribute was specified on a 2497 // typedef declaration. 2498 if (!TI.isAlignRequired()) 2499 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 2500 2501 return ABIAlign; 2502 } 2503 2504 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment 2505 /// for __attribute__((aligned)) on this target, to be used if no alignment 2506 /// value is specified. 2507 unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { 2508 return getTargetInfo().getDefaultAlignForAttributeAligned(); 2509 } 2510 2511 /// getAlignOfGlobalVar - Return the alignment in bits that should be given 2512 /// to a global variable of the specified type. 2513 unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { 2514 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 2515 return std::max(getPreferredTypeAlign(T), 2516 getTargetInfo().getMinGlobalAlign(TypeSize)); 2517 } 2518 2519 /// getAlignOfGlobalVarInChars - Return the alignment in characters that 2520 /// should be given to a global variable of the specified type. 2521 CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { 2522 return toCharUnitsFromBits(getAlignOfGlobalVar(T)); 2523 } 2524 2525 CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { 2526 CharUnits Offset = CharUnits::Zero(); 2527 const ASTRecordLayout *Layout = &getASTRecordLayout(RD); 2528 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { 2529 Offset += Layout->getBaseClassOffset(Base); 2530 Layout = &getASTRecordLayout(Base); 2531 } 2532 return Offset; 2533 } 2534 2535 CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const { 2536 const ValueDecl *MPD = MP.getMemberPointerDecl(); 2537 CharUnits ThisAdjustment = CharUnits::Zero(); 2538 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath(); 2539 bool DerivedMember = MP.isMemberPointerToDerivedMember(); 2540 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext()); 2541 for (unsigned I = 0, N = Path.size(); I != N; ++I) { 2542 const CXXRecordDecl *Base = RD; 2543 const CXXRecordDecl *Derived = Path[I]; 2544 if (DerivedMember) 2545 std::swap(Base, Derived); 2546 ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base); 2547 RD = Path[I]; 2548 } 2549 if (DerivedMember) 2550 ThisAdjustment = -ThisAdjustment; 2551 return ThisAdjustment; 2552 } 2553 2554 /// DeepCollectObjCIvars - 2555 /// This routine first collects all declared, but not synthesized, ivars in 2556 /// super class and then collects all ivars, including those synthesized for 2557 /// current class. This routine is used for implementation of current class 2558 /// when all ivars, declared and synthesized are known. 2559 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 2560 bool leafClass, 2561 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 2562 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 2563 DeepCollectObjCIvars(SuperClass, false, Ivars); 2564 if (!leafClass) { 2565 llvm::append_range(Ivars, OI->ivars()); 2566 } else { 2567 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 2568 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 2569 Iv= Iv->getNextIvar()) 2570 Ivars.push_back(Iv); 2571 } 2572 } 2573 2574 /// CollectInheritedProtocols - Collect all protocols in current class and 2575 /// those inherited by it. 2576 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 2577 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 2578 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 2579 // We can use protocol_iterator here instead of 2580 // all_referenced_protocol_iterator since we are walking all categories. 2581 for (auto *Proto : OI->all_referenced_protocols()) { 2582 CollectInheritedProtocols(Proto, Protocols); 2583 } 2584 2585 // Categories of this Interface. 2586 for (const auto *Cat : OI->visible_categories()) 2587 CollectInheritedProtocols(Cat, Protocols); 2588 2589 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 2590 while (SD) { 2591 CollectInheritedProtocols(SD, Protocols); 2592 SD = SD->getSuperClass(); 2593 } 2594 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 2595 for (auto *Proto : OC->protocols()) { 2596 CollectInheritedProtocols(Proto, Protocols); 2597 } 2598 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 2599 // Insert the protocol. 2600 if (!Protocols.insert( 2601 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second) 2602 return; 2603 2604 for (auto *Proto : OP->protocols()) 2605 CollectInheritedProtocols(Proto, Protocols); 2606 } 2607 } 2608 2609 static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, 2610 const RecordDecl *RD, 2611 bool CheckIfTriviallyCopyable) { 2612 assert(RD->isUnion() && "Must be union type"); 2613 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); 2614 2615 for (const auto *Field : RD->fields()) { 2616 if (!Context.hasUniqueObjectRepresentations(Field->getType(), 2617 CheckIfTriviallyCopyable)) 2618 return false; 2619 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); 2620 if (FieldSize != UnionSize) 2621 return false; 2622 } 2623 return !RD->field_empty(); 2624 } 2625 2626 static int64_t getSubobjectOffset(const FieldDecl *Field, 2627 const ASTContext &Context, 2628 const clang::ASTRecordLayout & /*Layout*/) { 2629 return Context.getFieldOffset(Field); 2630 } 2631 2632 static int64_t getSubobjectOffset(const CXXRecordDecl *RD, 2633 const ASTContext &Context, 2634 const clang::ASTRecordLayout &Layout) { 2635 return Context.toBits(Layout.getBaseClassOffset(RD)); 2636 } 2637 2638 static std::optional<int64_t> 2639 structHasUniqueObjectRepresentations(const ASTContext &Context, 2640 const RecordDecl *RD, 2641 bool CheckIfTriviallyCopyable); 2642 2643 static std::optional<int64_t> 2644 getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context, 2645 bool CheckIfTriviallyCopyable) { 2646 if (Field->getType()->isRecordType()) { 2647 const RecordDecl *RD = Field->getType()->getAsRecordDecl(); 2648 if (!RD->isUnion()) 2649 return structHasUniqueObjectRepresentations(Context, RD, 2650 CheckIfTriviallyCopyable); 2651 } 2652 2653 // A _BitInt type may not be unique if it has padding bits 2654 // but if it is a bitfield the padding bits are not used. 2655 bool IsBitIntType = Field->getType()->isBitIntType(); 2656 if (!Field->getType()->isReferenceType() && !IsBitIntType && 2657 !Context.hasUniqueObjectRepresentations(Field->getType(), 2658 CheckIfTriviallyCopyable)) 2659 return std::nullopt; 2660 2661 int64_t FieldSizeInBits = 2662 Context.toBits(Context.getTypeSizeInChars(Field->getType())); 2663 if (Field->isBitField()) { 2664 // If we have explicit padding bits, they don't contribute bits 2665 // to the actual object representation, so return 0. 2666 if (Field->isUnnamedBitfield()) 2667 return 0; 2668 2669 int64_t BitfieldSize = Field->getBitWidthValue(Context); 2670 if (IsBitIntType) { 2671 if ((unsigned)BitfieldSize > 2672 cast<BitIntType>(Field->getType())->getNumBits()) 2673 return std::nullopt; 2674 } else if (BitfieldSize > FieldSizeInBits) { 2675 return std::nullopt; 2676 } 2677 FieldSizeInBits = BitfieldSize; 2678 } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations( 2679 Field->getType(), CheckIfTriviallyCopyable)) { 2680 return std::nullopt; 2681 } 2682 return FieldSizeInBits; 2683 } 2684 2685 static std::optional<int64_t> 2686 getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context, 2687 bool CheckIfTriviallyCopyable) { 2688 return structHasUniqueObjectRepresentations(Context, RD, 2689 CheckIfTriviallyCopyable); 2690 } 2691 2692 template <typename RangeT> 2693 static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations( 2694 const RangeT &Subobjects, int64_t CurOffsetInBits, 2695 const ASTContext &Context, const clang::ASTRecordLayout &Layout, 2696 bool CheckIfTriviallyCopyable) { 2697 for (const auto *Subobject : Subobjects) { 2698 std::optional<int64_t> SizeInBits = 2699 getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable); 2700 if (!SizeInBits) 2701 return std::nullopt; 2702 if (*SizeInBits != 0) { 2703 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout); 2704 if (Offset != CurOffsetInBits) 2705 return std::nullopt; 2706 CurOffsetInBits += *SizeInBits; 2707 } 2708 } 2709 return CurOffsetInBits; 2710 } 2711 2712 static std::optional<int64_t> 2713 structHasUniqueObjectRepresentations(const ASTContext &Context, 2714 const RecordDecl *RD, 2715 bool CheckIfTriviallyCopyable) { 2716 assert(!RD->isUnion() && "Must be struct/class type"); 2717 const auto &Layout = Context.getASTRecordLayout(RD); 2718 2719 int64_t CurOffsetInBits = 0; 2720 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) { 2721 if (ClassDecl->isDynamicClass()) 2722 return std::nullopt; 2723 2724 SmallVector<CXXRecordDecl *, 4> Bases; 2725 for (const auto &Base : ClassDecl->bases()) { 2726 // Empty types can be inherited from, and non-empty types can potentially 2727 // have tail padding, so just make sure there isn't an error. 2728 Bases.emplace_back(Base.getType()->getAsCXXRecordDecl()); 2729 } 2730 2731 llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) { 2732 return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R); 2733 }); 2734 2735 std::optional<int64_t> OffsetAfterBases = 2736 structSubobjectsHaveUniqueObjectRepresentations( 2737 Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable); 2738 if (!OffsetAfterBases) 2739 return std::nullopt; 2740 CurOffsetInBits = *OffsetAfterBases; 2741 } 2742 2743 std::optional<int64_t> OffsetAfterFields = 2744 structSubobjectsHaveUniqueObjectRepresentations( 2745 RD->fields(), CurOffsetInBits, Context, Layout, 2746 CheckIfTriviallyCopyable); 2747 if (!OffsetAfterFields) 2748 return std::nullopt; 2749 CurOffsetInBits = *OffsetAfterFields; 2750 2751 return CurOffsetInBits; 2752 } 2753 2754 bool ASTContext::hasUniqueObjectRepresentations( 2755 QualType Ty, bool CheckIfTriviallyCopyable) const { 2756 // C++17 [meta.unary.prop]: 2757 // The predicate condition for a template specialization 2758 // has_unique_object_representations<T> shall be satisfied if and only if: 2759 // (9.1) - T is trivially copyable, and 2760 // (9.2) - any two objects of type T with the same value have the same 2761 // object representation, where: 2762 // - two objects of array or non-union class type are considered to have 2763 // the same value if their respective sequences of direct subobjects 2764 // have the same values, and 2765 // - two objects of union type are considered to have the same value if 2766 // they have the same active member and the corresponding members have 2767 // the same value. 2768 // The set of scalar types for which this condition holds is 2769 // implementation-defined. [ Note: If a type has padding bits, the condition 2770 // does not hold; otherwise, the condition holds true for unsigned integral 2771 // types. -- end note ] 2772 assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); 2773 2774 // Arrays are unique only if their element type is unique. 2775 if (Ty->isArrayType()) 2776 return hasUniqueObjectRepresentations(getBaseElementType(Ty), 2777 CheckIfTriviallyCopyable); 2778 2779 // (9.1) - T is trivially copyable... 2780 if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(*this)) 2781 return false; 2782 2783 // All integrals and enums are unique. 2784 if (Ty->isIntegralOrEnumerationType()) { 2785 // Except _BitInt types that have padding bits. 2786 if (const auto *BIT = Ty->getAs<BitIntType>()) 2787 return getTypeSize(BIT) == BIT->getNumBits(); 2788 2789 return true; 2790 } 2791 2792 // All other pointers are unique. 2793 if (Ty->isPointerType()) 2794 return true; 2795 2796 if (const auto *MPT = Ty->getAs<MemberPointerType>()) 2797 return !ABI->getMemberPointerInfo(MPT).HasPadding; 2798 2799 if (Ty->isRecordType()) { 2800 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl(); 2801 2802 if (Record->isInvalidDecl()) 2803 return false; 2804 2805 if (Record->isUnion()) 2806 return unionHasUniqueObjectRepresentations(*this, Record, 2807 CheckIfTriviallyCopyable); 2808 2809 std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations( 2810 *this, Record, CheckIfTriviallyCopyable); 2811 2812 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty)); 2813 } 2814 2815 // FIXME: More cases to handle here (list by rsmith): 2816 // vectors (careful about, eg, vector of 3 foo) 2817 // _Complex int and friends 2818 // _Atomic T 2819 // Obj-C block pointers 2820 // Obj-C object pointers 2821 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, 2822 // clk_event_t, queue_t, reserve_id_t) 2823 // There're also Obj-C class types and the Obj-C selector type, but I think it 2824 // makes sense for those to return false here. 2825 2826 return false; 2827 } 2828 2829 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 2830 unsigned count = 0; 2831 // Count ivars declared in class extension. 2832 for (const auto *Ext : OI->known_extensions()) 2833 count += Ext->ivar_size(); 2834 2835 // Count ivar defined in this class's implementation. This 2836 // includes synthesized ivars. 2837 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 2838 count += ImplDecl->ivar_size(); 2839 2840 return count; 2841 } 2842 2843 bool ASTContext::isSentinelNullExpr(const Expr *E) { 2844 if (!E) 2845 return false; 2846 2847 // nullptr_t is always treated as null. 2848 if (E->getType()->isNullPtrType()) return true; 2849 2850 if (E->getType()->isAnyPointerType() && 2851 E->IgnoreParenCasts()->isNullPointerConstant(*this, 2852 Expr::NPC_ValueDependentIsNull)) 2853 return true; 2854 2855 // Unfortunately, __null has type 'int'. 2856 if (isa<GNUNullExpr>(E)) return true; 2857 2858 return false; 2859 } 2860 2861 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none 2862 /// exists. 2863 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 2864 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2865 I = ObjCImpls.find(D); 2866 if (I != ObjCImpls.end()) 2867 return cast<ObjCImplementationDecl>(I->second); 2868 return nullptr; 2869 } 2870 2871 /// Get the implementation of ObjCCategoryDecl, or nullptr if none 2872 /// exists. 2873 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 2874 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2875 I = ObjCImpls.find(D); 2876 if (I != ObjCImpls.end()) 2877 return cast<ObjCCategoryImplDecl>(I->second); 2878 return nullptr; 2879 } 2880 2881 /// Set the implementation of ObjCInterfaceDecl. 2882 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 2883 ObjCImplementationDecl *ImplD) { 2884 assert(IFaceD && ImplD && "Passed null params"); 2885 ObjCImpls[IFaceD] = ImplD; 2886 } 2887 2888 /// Set the implementation of ObjCCategoryDecl. 2889 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 2890 ObjCCategoryImplDecl *ImplD) { 2891 assert(CatD && ImplD && "Passed null params"); 2892 ObjCImpls[CatD] = ImplD; 2893 } 2894 2895 const ObjCMethodDecl * 2896 ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { 2897 return ObjCMethodRedecls.lookup(MD); 2898 } 2899 2900 void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, 2901 const ObjCMethodDecl *Redecl) { 2902 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); 2903 ObjCMethodRedecls[MD] = Redecl; 2904 } 2905 2906 const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( 2907 const NamedDecl *ND) const { 2908 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) 2909 return ID; 2910 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) 2911 return CD->getClassInterface(); 2912 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext())) 2913 return IMD->getClassInterface(); 2914 2915 return nullptr; 2916 } 2917 2918 /// Get the copy initialization expression of VarDecl, or nullptr if 2919 /// none exists. 2920 BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { 2921 assert(VD && "Passed null params"); 2922 assert(VD->hasAttr<BlocksAttr>() && 2923 "getBlockVarCopyInits - not __block var"); 2924 auto I = BlockVarCopyInits.find(VD); 2925 if (I != BlockVarCopyInits.end()) 2926 return I->second; 2927 return {nullptr, false}; 2928 } 2929 2930 /// Set the copy initialization expression of a block var decl. 2931 void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, 2932 bool CanThrow) { 2933 assert(VD && CopyExpr && "Passed null params"); 2934 assert(VD->hasAttr<BlocksAttr>() && 2935 "setBlockVarCopyInits - not __block var"); 2936 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); 2937 } 2938 2939 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 2940 unsigned DataSize) const { 2941 if (!DataSize) 2942 DataSize = TypeLoc::getFullDataSizeForType(T); 2943 else 2944 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 2945 "incorrect data size provided to CreateTypeSourceInfo!"); 2946 2947 auto *TInfo = 2948 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 2949 new (TInfo) TypeSourceInfo(T, DataSize); 2950 return TInfo; 2951 } 2952 2953 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 2954 SourceLocation L) const { 2955 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 2956 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 2957 return DI; 2958 } 2959 2960 const ASTRecordLayout & 2961 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 2962 return getObjCLayout(D, nullptr); 2963 } 2964 2965 const ASTRecordLayout & 2966 ASTContext::getASTObjCImplementationLayout( 2967 const ObjCImplementationDecl *D) const { 2968 return getObjCLayout(D->getClassInterface(), D); 2969 } 2970 2971 static auto getCanonicalTemplateArguments(const ASTContext &C, 2972 ArrayRef<TemplateArgument> Args, 2973 bool &AnyNonCanonArgs) { 2974 SmallVector<TemplateArgument, 16> CanonArgs(Args); 2975 for (auto &Arg : CanonArgs) { 2976 TemplateArgument OrigArg = Arg; 2977 Arg = C.getCanonicalTemplateArgument(Arg); 2978 AnyNonCanonArgs |= !Arg.structurallyEquals(OrigArg); 2979 } 2980 return CanonArgs; 2981 } 2982 2983 //===----------------------------------------------------------------------===// 2984 // Type creation/memoization methods 2985 //===----------------------------------------------------------------------===// 2986 2987 QualType 2988 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 2989 unsigned fastQuals = quals.getFastQualifiers(); 2990 quals.removeFastQualifiers(); 2991 2992 // Check if we've already instantiated this type. 2993 llvm::FoldingSetNodeID ID; 2994 ExtQuals::Profile(ID, baseType, quals); 2995 void *insertPos = nullptr; 2996 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 2997 assert(eq->getQualifiers() == quals); 2998 return QualType(eq, fastQuals); 2999 } 3000 3001 // If the base type is not canonical, make the appropriate canonical type. 3002 QualType canon; 3003 if (!baseType->isCanonicalUnqualified()) { 3004 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 3005 canonSplit.Quals.addConsistentQualifiers(quals); 3006 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); 3007 3008 // Re-find the insert position. 3009 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 3010 } 3011 3012 auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals); 3013 ExtQualNodes.InsertNode(eq, insertPos); 3014 return QualType(eq, fastQuals); 3015 } 3016 3017 QualType ASTContext::getAddrSpaceQualType(QualType T, 3018 LangAS AddressSpace) const { 3019 QualType CanT = getCanonicalType(T); 3020 if (CanT.getAddressSpace() == AddressSpace) 3021 return T; 3022 3023 // If we are composing extended qualifiers together, merge together 3024 // into one ExtQuals node. 3025 QualifierCollector Quals; 3026 const Type *TypeNode = Quals.strip(T); 3027 3028 // If this type already has an address space specified, it cannot get 3029 // another one. 3030 assert(!Quals.hasAddressSpace() && 3031 "Type cannot be in multiple addr spaces!"); 3032 Quals.addAddressSpace(AddressSpace); 3033 3034 return getExtQualType(TypeNode, Quals); 3035 } 3036 3037 QualType ASTContext::removeAddrSpaceQualType(QualType T) const { 3038 // If the type is not qualified with an address space, just return it 3039 // immediately. 3040 if (!T.hasAddressSpace()) 3041 return T; 3042 3043 // If we are composing extended qualifiers together, merge together 3044 // into one ExtQuals node. 3045 QualifierCollector Quals; 3046 const Type *TypeNode; 3047 3048 while (T.hasAddressSpace()) { 3049 TypeNode = Quals.strip(T); 3050 3051 // If the type no longer has an address space after stripping qualifiers, 3052 // jump out. 3053 if (!QualType(TypeNode, 0).hasAddressSpace()) 3054 break; 3055 3056 // There might be sugar in the way. Strip it and try again. 3057 T = T.getSingleStepDesugaredType(*this); 3058 } 3059 3060 Quals.removeAddressSpace(); 3061 3062 // Removal of the address space can mean there are no longer any 3063 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) 3064 // or required. 3065 if (Quals.hasNonFastQualifiers()) 3066 return getExtQualType(TypeNode, Quals); 3067 else 3068 return QualType(TypeNode, Quals.getFastQualifiers()); 3069 } 3070 3071 QualType ASTContext::getObjCGCQualType(QualType T, 3072 Qualifiers::GC GCAttr) const { 3073 QualType CanT = getCanonicalType(T); 3074 if (CanT.getObjCGCAttr() == GCAttr) 3075 return T; 3076 3077 if (const auto *ptr = T->getAs<PointerType>()) { 3078 QualType Pointee = ptr->getPointeeType(); 3079 if (Pointee->isAnyPointerType()) { 3080 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 3081 return getPointerType(ResultType); 3082 } 3083 } 3084 3085 // If we are composing extended qualifiers together, merge together 3086 // into one ExtQuals node. 3087 QualifierCollector Quals; 3088 const Type *TypeNode = Quals.strip(T); 3089 3090 // If this type already has an ObjCGC specified, it cannot get 3091 // another one. 3092 assert(!Quals.hasObjCGCAttr() && 3093 "Type cannot have multiple ObjCGCs!"); 3094 Quals.addObjCGCAttr(GCAttr); 3095 3096 return getExtQualType(TypeNode, Quals); 3097 } 3098 3099 QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { 3100 if (const PointerType *Ptr = T->getAs<PointerType>()) { 3101 QualType Pointee = Ptr->getPointeeType(); 3102 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { 3103 return getPointerType(removeAddrSpaceQualType(Pointee)); 3104 } 3105 } 3106 return T; 3107 } 3108 3109 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 3110 FunctionType::ExtInfo Info) { 3111 if (T->getExtInfo() == Info) 3112 return T; 3113 3114 QualType Result; 3115 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 3116 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); 3117 } else { 3118 const auto *FPT = cast<FunctionProtoType>(T); 3119 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3120 EPI.ExtInfo = Info; 3121 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); 3122 } 3123 3124 return cast<FunctionType>(Result.getTypePtr()); 3125 } 3126 3127 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, 3128 QualType ResultType) { 3129 FD = FD->getMostRecentDecl(); 3130 while (true) { 3131 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 3132 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3133 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); 3134 if (FunctionDecl *Next = FD->getPreviousDecl()) 3135 FD = Next; 3136 else 3137 break; 3138 } 3139 if (ASTMutationListener *L = getASTMutationListener()) 3140 L->DeducedReturnType(FD, ResultType); 3141 } 3142 3143 /// Get a function type and produce the equivalent function type with the 3144 /// specified exception specification. Type sugar that can be present on a 3145 /// declaration of a function with an exception specification is permitted 3146 /// and preserved. Other type sugar (for instance, typedefs) is not. 3147 QualType ASTContext::getFunctionTypeWithExceptionSpec( 3148 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const { 3149 // Might have some parens. 3150 if (const auto *PT = dyn_cast<ParenType>(Orig)) 3151 return getParenType( 3152 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); 3153 3154 // Might be wrapped in a macro qualified type. 3155 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig)) 3156 return getMacroQualifiedType( 3157 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), 3158 MQT->getMacroIdentifier()); 3159 3160 // Might have a calling-convention attribute. 3161 if (const auto *AT = dyn_cast<AttributedType>(Orig)) 3162 return getAttributedType( 3163 AT->getAttrKind(), 3164 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), 3165 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); 3166 3167 // Anything else must be a function type. Rebuild it with the new exception 3168 // specification. 3169 const auto *Proto = Orig->castAs<FunctionProtoType>(); 3170 return getFunctionType( 3171 Proto->getReturnType(), Proto->getParamTypes(), 3172 Proto->getExtProtoInfo().withExceptionSpec(ESI)); 3173 } 3174 3175 bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, 3176 QualType U) const { 3177 return hasSameType(T, U) || 3178 (getLangOpts().CPlusPlus17 && 3179 hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), 3180 getFunctionTypeWithExceptionSpec(U, EST_None))); 3181 } 3182 3183 QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { 3184 if (const auto *Proto = T->getAs<FunctionProtoType>()) { 3185 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3186 SmallVector<QualType, 16> Args(Proto->param_types().size()); 3187 for (unsigned i = 0, n = Args.size(); i != n; ++i) 3188 Args[i] = removePtrSizeAddrSpace(Proto->param_types()[i]); 3189 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); 3190 } 3191 3192 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) { 3193 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3194 return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); 3195 } 3196 3197 return T; 3198 } 3199 3200 bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { 3201 return hasSameType(T, U) || 3202 hasSameType(getFunctionTypeWithoutPtrSizes(T), 3203 getFunctionTypeWithoutPtrSizes(U)); 3204 } 3205 3206 void ASTContext::adjustExceptionSpec( 3207 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, 3208 bool AsWritten) { 3209 // Update the type. 3210 QualType Updated = 3211 getFunctionTypeWithExceptionSpec(FD->getType(), ESI); 3212 FD->setType(Updated); 3213 3214 if (!AsWritten) 3215 return; 3216 3217 // Update the type in the type source information too. 3218 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { 3219 // If the type and the type-as-written differ, we may need to update 3220 // the type-as-written too. 3221 if (TSInfo->getType() != FD->getType()) 3222 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); 3223 3224 // FIXME: When we get proper type location information for exceptions, 3225 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch 3226 // up the TypeSourceInfo; 3227 assert(TypeLoc::getFullDataSizeForType(Updated) == 3228 TypeLoc::getFullDataSizeForType(TSInfo->getType()) && 3229 "TypeLoc size mismatch from updating exception specification"); 3230 TSInfo->overrideType(Updated); 3231 } 3232 } 3233 3234 /// getComplexType - Return the uniqued reference to the type for a complex 3235 /// number with the specified element type. 3236 QualType ASTContext::getComplexType(QualType T) const { 3237 // Unique pointers, to guarantee there is only one pointer of a particular 3238 // structure. 3239 llvm::FoldingSetNodeID ID; 3240 ComplexType::Profile(ID, T); 3241 3242 void *InsertPos = nullptr; 3243 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 3244 return QualType(CT, 0); 3245 3246 // If the pointee type isn't canonical, this won't be a canonical type either, 3247 // so fill in the canonical type field. 3248 QualType Canonical; 3249 if (!T.isCanonical()) { 3250 Canonical = getComplexType(getCanonicalType(T)); 3251 3252 // Get the new insert position for the node we care about. 3253 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 3254 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3255 } 3256 auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical); 3257 Types.push_back(New); 3258 ComplexTypes.InsertNode(New, InsertPos); 3259 return QualType(New, 0); 3260 } 3261 3262 /// getPointerType - Return the uniqued reference to the type for a pointer to 3263 /// the specified type. 3264 QualType ASTContext::getPointerType(QualType T) const { 3265 // Unique pointers, to guarantee there is only one pointer of a particular 3266 // structure. 3267 llvm::FoldingSetNodeID ID; 3268 PointerType::Profile(ID, T); 3269 3270 void *InsertPos = nullptr; 3271 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3272 return QualType(PT, 0); 3273 3274 // If the pointee type isn't canonical, this won't be a canonical type either, 3275 // so fill in the canonical type field. 3276 QualType Canonical; 3277 if (!T.isCanonical()) { 3278 Canonical = getPointerType(getCanonicalType(T)); 3279 3280 // Get the new insert position for the node we care about. 3281 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3282 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3283 } 3284 auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical); 3285 Types.push_back(New); 3286 PointerTypes.InsertNode(New, InsertPos); 3287 return QualType(New, 0); 3288 } 3289 3290 QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { 3291 llvm::FoldingSetNodeID ID; 3292 AdjustedType::Profile(ID, Orig, New); 3293 void *InsertPos = nullptr; 3294 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3295 if (AT) 3296 return QualType(AT, 0); 3297 3298 QualType Canonical = getCanonicalType(New); 3299 3300 // Get the new insert position for the node we care about. 3301 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3302 assert(!AT && "Shouldn't be in the map!"); 3303 3304 AT = new (*this, alignof(AdjustedType)) 3305 AdjustedType(Type::Adjusted, Orig, New, Canonical); 3306 Types.push_back(AT); 3307 AdjustedTypes.InsertNode(AT, InsertPos); 3308 return QualType(AT, 0); 3309 } 3310 3311 QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const { 3312 llvm::FoldingSetNodeID ID; 3313 AdjustedType::Profile(ID, Orig, Decayed); 3314 void *InsertPos = nullptr; 3315 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3316 if (AT) 3317 return QualType(AT, 0); 3318 3319 QualType Canonical = getCanonicalType(Decayed); 3320 3321 // Get the new insert position for the node we care about. 3322 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3323 assert(!AT && "Shouldn't be in the map!"); 3324 3325 AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical); 3326 Types.push_back(AT); 3327 AdjustedTypes.InsertNode(AT, InsertPos); 3328 return QualType(AT, 0); 3329 } 3330 3331 QualType ASTContext::getDecayedType(QualType T) const { 3332 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); 3333 3334 QualType Decayed; 3335 3336 // C99 6.7.5.3p7: 3337 // A declaration of a parameter as "array of type" shall be 3338 // adjusted to "qualified pointer to type", where the type 3339 // qualifiers (if any) are those specified within the [ and ] of 3340 // the array type derivation. 3341 if (T->isArrayType()) 3342 Decayed = getArrayDecayedType(T); 3343 3344 // C99 6.7.5.3p8: 3345 // A declaration of a parameter as "function returning type" 3346 // shall be adjusted to "pointer to function returning type", as 3347 // in 6.3.2.1. 3348 if (T->isFunctionType()) 3349 Decayed = getPointerType(T); 3350 3351 return getDecayedType(T, Decayed); 3352 } 3353 3354 /// getBlockPointerType - Return the uniqued reference to the type for 3355 /// a pointer to the specified block. 3356 QualType ASTContext::getBlockPointerType(QualType T) const { 3357 assert(T->isFunctionType() && "block of function types only"); 3358 // Unique pointers, to guarantee there is only one block of a particular 3359 // structure. 3360 llvm::FoldingSetNodeID ID; 3361 BlockPointerType::Profile(ID, T); 3362 3363 void *InsertPos = nullptr; 3364 if (BlockPointerType *PT = 3365 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3366 return QualType(PT, 0); 3367 3368 // If the block pointee type isn't canonical, this won't be a canonical 3369 // type either so fill in the canonical type field. 3370 QualType Canonical; 3371 if (!T.isCanonical()) { 3372 Canonical = getBlockPointerType(getCanonicalType(T)); 3373 3374 // Get the new insert position for the node we care about. 3375 BlockPointerType *NewIP = 3376 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3377 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3378 } 3379 auto *New = 3380 new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical); 3381 Types.push_back(New); 3382 BlockPointerTypes.InsertNode(New, InsertPos); 3383 return QualType(New, 0); 3384 } 3385 3386 /// getLValueReferenceType - Return the uniqued reference to the type for an 3387 /// lvalue reference to the specified type. 3388 QualType 3389 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 3390 assert((!T->isPlaceholderType() || 3391 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3392 "Unresolved placeholder type"); 3393 3394 // Unique pointers, to guarantee there is only one pointer of a particular 3395 // structure. 3396 llvm::FoldingSetNodeID ID; 3397 ReferenceType::Profile(ID, T, SpelledAsLValue); 3398 3399 void *InsertPos = nullptr; 3400 if (LValueReferenceType *RT = 3401 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3402 return QualType(RT, 0); 3403 3404 const auto *InnerRef = T->getAs<ReferenceType>(); 3405 3406 // If the referencee type isn't canonical, this won't be a canonical type 3407 // either, so fill in the canonical type field. 3408 QualType Canonical; 3409 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 3410 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3411 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 3412 3413 // Get the new insert position for the node we care about. 3414 LValueReferenceType *NewIP = 3415 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3416 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3417 } 3418 3419 auto *New = new (*this, alignof(LValueReferenceType)) 3420 LValueReferenceType(T, Canonical, SpelledAsLValue); 3421 Types.push_back(New); 3422 LValueReferenceTypes.InsertNode(New, InsertPos); 3423 3424 return QualType(New, 0); 3425 } 3426 3427 /// getRValueReferenceType - Return the uniqued reference to the type for an 3428 /// rvalue reference to the specified type. 3429 QualType ASTContext::getRValueReferenceType(QualType T) const { 3430 assert((!T->isPlaceholderType() || 3431 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3432 "Unresolved placeholder type"); 3433 3434 // Unique pointers, to guarantee there is only one pointer of a particular 3435 // structure. 3436 llvm::FoldingSetNodeID ID; 3437 ReferenceType::Profile(ID, T, false); 3438 3439 void *InsertPos = nullptr; 3440 if (RValueReferenceType *RT = 3441 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3442 return QualType(RT, 0); 3443 3444 const auto *InnerRef = T->getAs<ReferenceType>(); 3445 3446 // If the referencee type isn't canonical, this won't be a canonical type 3447 // either, so fill in the canonical type field. 3448 QualType Canonical; 3449 if (InnerRef || !T.isCanonical()) { 3450 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3451 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 3452 3453 // Get the new insert position for the node we care about. 3454 RValueReferenceType *NewIP = 3455 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3456 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3457 } 3458 3459 auto *New = new (*this, alignof(RValueReferenceType)) 3460 RValueReferenceType(T, Canonical); 3461 Types.push_back(New); 3462 RValueReferenceTypes.InsertNode(New, InsertPos); 3463 return QualType(New, 0); 3464 } 3465 3466 /// getMemberPointerType - Return the uniqued reference to the type for a 3467 /// member pointer to the specified type, in the specified class. 3468 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 3469 // Unique pointers, to guarantee there is only one pointer of a particular 3470 // structure. 3471 llvm::FoldingSetNodeID ID; 3472 MemberPointerType::Profile(ID, T, Cls); 3473 3474 void *InsertPos = nullptr; 3475 if (MemberPointerType *PT = 3476 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3477 return QualType(PT, 0); 3478 3479 // If the pointee or class type isn't canonical, this won't be a canonical 3480 // type either, so fill in the canonical type field. 3481 QualType Canonical; 3482 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 3483 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 3484 3485 // Get the new insert position for the node we care about. 3486 MemberPointerType *NewIP = 3487 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3488 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3489 } 3490 auto *New = new (*this, alignof(MemberPointerType)) 3491 MemberPointerType(T, Cls, Canonical); 3492 Types.push_back(New); 3493 MemberPointerTypes.InsertNode(New, InsertPos); 3494 return QualType(New, 0); 3495 } 3496 3497 /// getConstantArrayType - Return the unique reference to the type for an 3498 /// array of the specified element type. 3499 QualType ASTContext::getConstantArrayType(QualType EltTy, 3500 const llvm::APInt &ArySizeIn, 3501 const Expr *SizeExpr, 3502 ArraySizeModifier ASM, 3503 unsigned IndexTypeQuals) const { 3504 assert((EltTy->isDependentType() || 3505 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 3506 "Constant array of VLAs is illegal!"); 3507 3508 // We only need the size as part of the type if it's instantiation-dependent. 3509 if (SizeExpr && !SizeExpr->isInstantiationDependent()) 3510 SizeExpr = nullptr; 3511 3512 // Convert the array size into a canonical width matching the pointer size for 3513 // the target. 3514 llvm::APInt ArySize(ArySizeIn); 3515 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); 3516 3517 llvm::FoldingSetNodeID ID; 3518 ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, 3519 IndexTypeQuals); 3520 3521 void *InsertPos = nullptr; 3522 if (ConstantArrayType *ATP = 3523 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 3524 return QualType(ATP, 0); 3525 3526 // If the element type isn't canonical or has qualifiers, or the array bound 3527 // is instantiation-dependent, this won't be a canonical type either, so fill 3528 // in the canonical type field. 3529 QualType Canon; 3530 // FIXME: Check below should look for qualifiers behind sugar. 3531 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { 3532 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3533 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, 3534 ASM, IndexTypeQuals); 3535 Canon = getQualifiedType(Canon, canonSplit.Quals); 3536 3537 // Get the new insert position for the node we care about. 3538 ConstantArrayType *NewIP = 3539 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 3540 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3541 } 3542 3543 void *Mem = Allocate( 3544 ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0), 3545 alignof(ConstantArrayType)); 3546 auto *New = new (Mem) 3547 ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); 3548 ConstantArrayTypes.InsertNode(New, InsertPos); 3549 Types.push_back(New); 3550 return QualType(New, 0); 3551 } 3552 3553 /// getVariableArrayDecayedType - Turns the given type, which may be 3554 /// variably-modified, into the corresponding type with all the known 3555 /// sizes replaced with [*]. 3556 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 3557 // Vastly most common case. 3558 if (!type->isVariablyModifiedType()) return type; 3559 3560 QualType result; 3561 3562 SplitQualType split = type.getSplitDesugaredType(); 3563 const Type *ty = split.Ty; 3564 switch (ty->getTypeClass()) { 3565 #define TYPE(Class, Base) 3566 #define ABSTRACT_TYPE(Class, Base) 3567 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3568 #include "clang/AST/TypeNodes.inc" 3569 llvm_unreachable("didn't desugar past all non-canonical types?"); 3570 3571 // These types should never be variably-modified. 3572 case Type::Builtin: 3573 case Type::Complex: 3574 case Type::Vector: 3575 case Type::DependentVector: 3576 case Type::ExtVector: 3577 case Type::DependentSizedExtVector: 3578 case Type::ConstantMatrix: 3579 case Type::DependentSizedMatrix: 3580 case Type::DependentAddressSpace: 3581 case Type::ObjCObject: 3582 case Type::ObjCInterface: 3583 case Type::ObjCObjectPointer: 3584 case Type::Record: 3585 case Type::Enum: 3586 case Type::UnresolvedUsing: 3587 case Type::TypeOfExpr: 3588 case Type::TypeOf: 3589 case Type::Decltype: 3590 case Type::UnaryTransform: 3591 case Type::DependentName: 3592 case Type::InjectedClassName: 3593 case Type::TemplateSpecialization: 3594 case Type::DependentTemplateSpecialization: 3595 case Type::TemplateTypeParm: 3596 case Type::SubstTemplateTypeParmPack: 3597 case Type::Auto: 3598 case Type::DeducedTemplateSpecialization: 3599 case Type::PackExpansion: 3600 case Type::BitInt: 3601 case Type::DependentBitInt: 3602 llvm_unreachable("type should never be variably-modified"); 3603 3604 // These types can be variably-modified but should never need to 3605 // further decay. 3606 case Type::FunctionNoProto: 3607 case Type::FunctionProto: 3608 case Type::BlockPointer: 3609 case Type::MemberPointer: 3610 case Type::Pipe: 3611 return type; 3612 3613 // These types can be variably-modified. All these modifications 3614 // preserve structure except as noted by comments. 3615 // TODO: if we ever care about optimizing VLAs, there are no-op 3616 // optimizations available here. 3617 case Type::Pointer: 3618 result = getPointerType(getVariableArrayDecayedType( 3619 cast<PointerType>(ty)->getPointeeType())); 3620 break; 3621 3622 case Type::LValueReference: { 3623 const auto *lv = cast<LValueReferenceType>(ty); 3624 result = getLValueReferenceType( 3625 getVariableArrayDecayedType(lv->getPointeeType()), 3626 lv->isSpelledAsLValue()); 3627 break; 3628 } 3629 3630 case Type::RValueReference: { 3631 const auto *lv = cast<RValueReferenceType>(ty); 3632 result = getRValueReferenceType( 3633 getVariableArrayDecayedType(lv->getPointeeType())); 3634 break; 3635 } 3636 3637 case Type::Atomic: { 3638 const auto *at = cast<AtomicType>(ty); 3639 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 3640 break; 3641 } 3642 3643 case Type::ConstantArray: { 3644 const auto *cat = cast<ConstantArrayType>(ty); 3645 result = getConstantArrayType( 3646 getVariableArrayDecayedType(cat->getElementType()), 3647 cat->getSize(), 3648 cat->getSizeExpr(), 3649 cat->getSizeModifier(), 3650 cat->getIndexTypeCVRQualifiers()); 3651 break; 3652 } 3653 3654 case Type::DependentSizedArray: { 3655 const auto *dat = cast<DependentSizedArrayType>(ty); 3656 result = getDependentSizedArrayType( 3657 getVariableArrayDecayedType(dat->getElementType()), 3658 dat->getSizeExpr(), 3659 dat->getSizeModifier(), 3660 dat->getIndexTypeCVRQualifiers(), 3661 dat->getBracketsRange()); 3662 break; 3663 } 3664 3665 // Turn incomplete types into [*] types. 3666 case Type::IncompleteArray: { 3667 const auto *iat = cast<IncompleteArrayType>(ty); 3668 result = 3669 getVariableArrayType(getVariableArrayDecayedType(iat->getElementType()), 3670 /*size*/ nullptr, ArraySizeModifier::Normal, 3671 iat->getIndexTypeCVRQualifiers(), SourceRange()); 3672 break; 3673 } 3674 3675 // Turn VLA types into [*] types. 3676 case Type::VariableArray: { 3677 const auto *vat = cast<VariableArrayType>(ty); 3678 result = getVariableArrayType( 3679 getVariableArrayDecayedType(vat->getElementType()), 3680 /*size*/ nullptr, ArraySizeModifier::Star, 3681 vat->getIndexTypeCVRQualifiers(), vat->getBracketsRange()); 3682 break; 3683 } 3684 } 3685 3686 // Apply the top-level qualifiers from the original. 3687 return getQualifiedType(result, split.Quals); 3688 } 3689 3690 /// getVariableArrayType - Returns a non-unique reference to the type for a 3691 /// variable array of the specified element type. 3692 QualType ASTContext::getVariableArrayType(QualType EltTy, Expr *NumElts, 3693 ArraySizeModifier ASM, 3694 unsigned IndexTypeQuals, 3695 SourceRange Brackets) const { 3696 // Since we don't unique expressions, it isn't possible to unique VLA's 3697 // that have an expression provided for their size. 3698 QualType Canon; 3699 3700 // Be sure to pull qualifiers off the element type. 3701 // FIXME: Check below should look for qualifiers behind sugar. 3702 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 3703 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3704 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, 3705 IndexTypeQuals, Brackets); 3706 Canon = getQualifiedType(Canon, canonSplit.Quals); 3707 } 3708 3709 auto *New = new (*this, alignof(VariableArrayType)) 3710 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 3711 3712 VariableArrayTypes.push_back(New); 3713 Types.push_back(New); 3714 return QualType(New, 0); 3715 } 3716 3717 /// getDependentSizedArrayType - Returns a non-unique reference to 3718 /// the type for a dependently-sized array of the specified element 3719 /// type. 3720 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 3721 Expr *numElements, 3722 ArraySizeModifier ASM, 3723 unsigned elementTypeQuals, 3724 SourceRange brackets) const { 3725 assert((!numElements || numElements->isTypeDependent() || 3726 numElements->isValueDependent()) && 3727 "Size must be type- or value-dependent!"); 3728 3729 // Dependently-sized array types that do not have a specified number 3730 // of elements will have their sizes deduced from a dependent 3731 // initializer. We do no canonicalization here at all, which is okay 3732 // because they can't be used in most locations. 3733 if (!numElements) { 3734 auto *newType = new (*this, alignof(DependentSizedArrayType)) 3735 DependentSizedArrayType(elementType, QualType(), numElements, ASM, 3736 elementTypeQuals, brackets); 3737 Types.push_back(newType); 3738 return QualType(newType, 0); 3739 } 3740 3741 // Otherwise, we actually build a new type every time, but we 3742 // also build a canonical type. 3743 3744 SplitQualType canonElementType = getCanonicalType(elementType).split(); 3745 3746 void *insertPos = nullptr; 3747 llvm::FoldingSetNodeID ID; 3748 DependentSizedArrayType::Profile(ID, *this, 3749 QualType(canonElementType.Ty, 0), 3750 ASM, elementTypeQuals, numElements); 3751 3752 // Look for an existing type with these properties. 3753 DependentSizedArrayType *canonTy = 3754 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3755 3756 // If we don't have one, build one. 3757 if (!canonTy) { 3758 canonTy = new (*this, alignof(DependentSizedArrayType)) 3759 DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(), 3760 numElements, ASM, elementTypeQuals, brackets); 3761 DependentSizedArrayTypes.InsertNode(canonTy, insertPos); 3762 Types.push_back(canonTy); 3763 } 3764 3765 // Apply qualifiers from the element type to the array. 3766 QualType canon = getQualifiedType(QualType(canonTy,0), 3767 canonElementType.Quals); 3768 3769 // If we didn't need extra canonicalization for the element type or the size 3770 // expression, then just use that as our result. 3771 if (QualType(canonElementType.Ty, 0) == elementType && 3772 canonTy->getSizeExpr() == numElements) 3773 return canon; 3774 3775 // Otherwise, we need to build a type which follows the spelling 3776 // of the element type. 3777 auto *sugaredType = new (*this, alignof(DependentSizedArrayType)) 3778 DependentSizedArrayType(elementType, canon, numElements, ASM, 3779 elementTypeQuals, brackets); 3780 Types.push_back(sugaredType); 3781 return QualType(sugaredType, 0); 3782 } 3783 3784 QualType ASTContext::getIncompleteArrayType(QualType elementType, 3785 ArraySizeModifier ASM, 3786 unsigned elementTypeQuals) const { 3787 llvm::FoldingSetNodeID ID; 3788 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); 3789 3790 void *insertPos = nullptr; 3791 if (IncompleteArrayType *iat = 3792 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) 3793 return QualType(iat, 0); 3794 3795 // If the element type isn't canonical, this won't be a canonical type 3796 // either, so fill in the canonical type field. We also have to pull 3797 // qualifiers off the element type. 3798 QualType canon; 3799 3800 // FIXME: Check below should look for qualifiers behind sugar. 3801 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { 3802 SplitQualType canonSplit = getCanonicalType(elementType).split(); 3803 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), 3804 ASM, elementTypeQuals); 3805 canon = getQualifiedType(canon, canonSplit.Quals); 3806 3807 // Get the new insert position for the node we care about. 3808 IncompleteArrayType *existing = 3809 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3810 assert(!existing && "Shouldn't be in the map!"); (void) existing; 3811 } 3812 3813 auto *newType = new (*this, alignof(IncompleteArrayType)) 3814 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); 3815 3816 IncompleteArrayTypes.InsertNode(newType, insertPos); 3817 Types.push_back(newType); 3818 return QualType(newType, 0); 3819 } 3820 3821 ASTContext::BuiltinVectorTypeInfo 3822 ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { 3823 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \ 3824 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \ 3825 NUMVECTORS}; 3826 3827 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \ 3828 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; 3829 3830 switch (Ty->getKind()) { 3831 default: 3832 llvm_unreachable("Unsupported builtin vector type"); 3833 case BuiltinType::SveInt8: 3834 return SVE_INT_ELTTY(8, 16, true, 1); 3835 case BuiltinType::SveUint8: 3836 return SVE_INT_ELTTY(8, 16, false, 1); 3837 case BuiltinType::SveInt8x2: 3838 return SVE_INT_ELTTY(8, 16, true, 2); 3839 case BuiltinType::SveUint8x2: 3840 return SVE_INT_ELTTY(8, 16, false, 2); 3841 case BuiltinType::SveInt8x3: 3842 return SVE_INT_ELTTY(8, 16, true, 3); 3843 case BuiltinType::SveUint8x3: 3844 return SVE_INT_ELTTY(8, 16, false, 3); 3845 case BuiltinType::SveInt8x4: 3846 return SVE_INT_ELTTY(8, 16, true, 4); 3847 case BuiltinType::SveUint8x4: 3848 return SVE_INT_ELTTY(8, 16, false, 4); 3849 case BuiltinType::SveInt16: 3850 return SVE_INT_ELTTY(16, 8, true, 1); 3851 case BuiltinType::SveUint16: 3852 return SVE_INT_ELTTY(16, 8, false, 1); 3853 case BuiltinType::SveInt16x2: 3854 return SVE_INT_ELTTY(16, 8, true, 2); 3855 case BuiltinType::SveUint16x2: 3856 return SVE_INT_ELTTY(16, 8, false, 2); 3857 case BuiltinType::SveInt16x3: 3858 return SVE_INT_ELTTY(16, 8, true, 3); 3859 case BuiltinType::SveUint16x3: 3860 return SVE_INT_ELTTY(16, 8, false, 3); 3861 case BuiltinType::SveInt16x4: 3862 return SVE_INT_ELTTY(16, 8, true, 4); 3863 case BuiltinType::SveUint16x4: 3864 return SVE_INT_ELTTY(16, 8, false, 4); 3865 case BuiltinType::SveInt32: 3866 return SVE_INT_ELTTY(32, 4, true, 1); 3867 case BuiltinType::SveUint32: 3868 return SVE_INT_ELTTY(32, 4, false, 1); 3869 case BuiltinType::SveInt32x2: 3870 return SVE_INT_ELTTY(32, 4, true, 2); 3871 case BuiltinType::SveUint32x2: 3872 return SVE_INT_ELTTY(32, 4, false, 2); 3873 case BuiltinType::SveInt32x3: 3874 return SVE_INT_ELTTY(32, 4, true, 3); 3875 case BuiltinType::SveUint32x3: 3876 return SVE_INT_ELTTY(32, 4, false, 3); 3877 case BuiltinType::SveInt32x4: 3878 return SVE_INT_ELTTY(32, 4, true, 4); 3879 case BuiltinType::SveUint32x4: 3880 return SVE_INT_ELTTY(32, 4, false, 4); 3881 case BuiltinType::SveInt64: 3882 return SVE_INT_ELTTY(64, 2, true, 1); 3883 case BuiltinType::SveUint64: 3884 return SVE_INT_ELTTY(64, 2, false, 1); 3885 case BuiltinType::SveInt64x2: 3886 return SVE_INT_ELTTY(64, 2, true, 2); 3887 case BuiltinType::SveUint64x2: 3888 return SVE_INT_ELTTY(64, 2, false, 2); 3889 case BuiltinType::SveInt64x3: 3890 return SVE_INT_ELTTY(64, 2, true, 3); 3891 case BuiltinType::SveUint64x3: 3892 return SVE_INT_ELTTY(64, 2, false, 3); 3893 case BuiltinType::SveInt64x4: 3894 return SVE_INT_ELTTY(64, 2, true, 4); 3895 case BuiltinType::SveUint64x4: 3896 return SVE_INT_ELTTY(64, 2, false, 4); 3897 case BuiltinType::SveBool: 3898 return SVE_ELTTY(BoolTy, 16, 1); 3899 case BuiltinType::SveBoolx2: 3900 return SVE_ELTTY(BoolTy, 16, 2); 3901 case BuiltinType::SveBoolx4: 3902 return SVE_ELTTY(BoolTy, 16, 4); 3903 case BuiltinType::SveFloat16: 3904 return SVE_ELTTY(HalfTy, 8, 1); 3905 case BuiltinType::SveFloat16x2: 3906 return SVE_ELTTY(HalfTy, 8, 2); 3907 case BuiltinType::SveFloat16x3: 3908 return SVE_ELTTY(HalfTy, 8, 3); 3909 case BuiltinType::SveFloat16x4: 3910 return SVE_ELTTY(HalfTy, 8, 4); 3911 case BuiltinType::SveFloat32: 3912 return SVE_ELTTY(FloatTy, 4, 1); 3913 case BuiltinType::SveFloat32x2: 3914 return SVE_ELTTY(FloatTy, 4, 2); 3915 case BuiltinType::SveFloat32x3: 3916 return SVE_ELTTY(FloatTy, 4, 3); 3917 case BuiltinType::SveFloat32x4: 3918 return SVE_ELTTY(FloatTy, 4, 4); 3919 case BuiltinType::SveFloat64: 3920 return SVE_ELTTY(DoubleTy, 2, 1); 3921 case BuiltinType::SveFloat64x2: 3922 return SVE_ELTTY(DoubleTy, 2, 2); 3923 case BuiltinType::SveFloat64x3: 3924 return SVE_ELTTY(DoubleTy, 2, 3); 3925 case BuiltinType::SveFloat64x4: 3926 return SVE_ELTTY(DoubleTy, 2, 4); 3927 case BuiltinType::SveBFloat16: 3928 return SVE_ELTTY(BFloat16Ty, 8, 1); 3929 case BuiltinType::SveBFloat16x2: 3930 return SVE_ELTTY(BFloat16Ty, 8, 2); 3931 case BuiltinType::SveBFloat16x3: 3932 return SVE_ELTTY(BFloat16Ty, 8, 3); 3933 case BuiltinType::SveBFloat16x4: 3934 return SVE_ELTTY(BFloat16Ty, 8, 4); 3935 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \ 3936 IsSigned) \ 3937 case BuiltinType::Id: \ 3938 return {getIntTypeForBitwidth(ElBits, IsSigned), \ 3939 llvm::ElementCount::getScalable(NumEls), NF}; 3940 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3941 case BuiltinType::Id: \ 3942 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ 3943 llvm::ElementCount::getScalable(NumEls), NF}; 3944 #define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3945 case BuiltinType::Id: \ 3946 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF}; 3947 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3948 case BuiltinType::Id: \ 3949 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; 3950 #include "clang/Basic/RISCVVTypes.def" 3951 } 3952 } 3953 3954 /// getExternrefType - Return a WebAssembly externref type, which represents an 3955 /// opaque reference to a host value. 3956 QualType ASTContext::getWebAssemblyExternrefType() const { 3957 if (Target->getTriple().isWasm() && Target->hasFeature("reference-types")) { 3958 #define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \ 3959 if (BuiltinType::Id == BuiltinType::WasmExternRef) \ 3960 return SingletonId; 3961 #include "clang/Basic/WebAssemblyReferenceTypes.def" 3962 } 3963 llvm_unreachable( 3964 "shouldn't try to generate type externref outside WebAssembly target"); 3965 } 3966 3967 /// getScalableVectorType - Return the unique reference to a scalable vector 3968 /// type of the specified element type and size. VectorType must be a built-in 3969 /// type. 3970 QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts, 3971 unsigned NumFields) const { 3972 if (Target->hasAArch64SVETypes()) { 3973 uint64_t EltTySize = getTypeSize(EltTy); 3974 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 3975 IsSigned, IsFP, IsBF) \ 3976 if (!EltTy->isBooleanType() && \ 3977 ((EltTy->hasIntegerRepresentation() && \ 3978 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3979 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 3980 IsFP && !IsBF) || \ 3981 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 3982 IsBF && !IsFP)) && \ 3983 EltTySize == ElBits && NumElts == NumEls) { \ 3984 return SingletonId; \ 3985 } 3986 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 3987 if (EltTy->isBooleanType() && NumElts == NumEls) \ 3988 return SingletonId; 3989 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingleTonId) 3990 #include "clang/Basic/AArch64SVEACLETypes.def" 3991 } else if (Target->hasRISCVVTypes()) { 3992 uint64_t EltTySize = getTypeSize(EltTy); 3993 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ 3994 IsFP, IsBF) \ 3995 if (!EltTy->isBooleanType() && \ 3996 ((EltTy->hasIntegerRepresentation() && \ 3997 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3998 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 3999 IsFP && !IsBF) || \ 4000 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 4001 IsBF && !IsFP)) && \ 4002 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \ 4003 return SingletonId; 4004 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 4005 if (EltTy->isBooleanType() && NumElts == NumEls) \ 4006 return SingletonId; 4007 #include "clang/Basic/RISCVVTypes.def" 4008 } 4009 return QualType(); 4010 } 4011 4012 /// getVectorType - Return the unique reference to a vector type of 4013 /// the specified element type and size. VectorType must be a built-in type. 4014 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, 4015 VectorKind VecKind) const { 4016 assert(vecType->isBuiltinType() || 4017 (vecType->isBitIntType() && 4018 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4019 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) && 4020 vecType->castAs<BitIntType>()->getNumBits() >= 8)); 4021 4022 // Check if we've already instantiated a vector of this type. 4023 llvm::FoldingSetNodeID ID; 4024 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); 4025 4026 void *InsertPos = nullptr; 4027 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4028 return QualType(VTP, 0); 4029 4030 // If the element type isn't canonical, this won't be a canonical type either, 4031 // so fill in the canonical type field. 4032 QualType Canonical; 4033 if (!vecType.isCanonical()) { 4034 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); 4035 4036 // Get the new insert position for the node we care about. 4037 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4038 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4039 } 4040 auto *New = new (*this, alignof(VectorType)) 4041 VectorType(vecType, NumElts, Canonical, VecKind); 4042 VectorTypes.InsertNode(New, InsertPos); 4043 Types.push_back(New); 4044 return QualType(New, 0); 4045 } 4046 4047 QualType ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, 4048 SourceLocation AttrLoc, 4049 VectorKind VecKind) const { 4050 llvm::FoldingSetNodeID ID; 4051 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, 4052 VecKind); 4053 void *InsertPos = nullptr; 4054 DependentVectorType *Canon = 4055 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4056 DependentVectorType *New; 4057 4058 if (Canon) { 4059 New = new (*this, alignof(DependentVectorType)) DependentVectorType( 4060 VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); 4061 } else { 4062 QualType CanonVecTy = getCanonicalType(VecType); 4063 if (CanonVecTy == VecType) { 4064 New = new (*this, alignof(DependentVectorType)) 4065 DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind); 4066 4067 DependentVectorType *CanonCheck = 4068 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4069 assert(!CanonCheck && 4070 "Dependent-sized vector_size canonical type broken"); 4071 (void)CanonCheck; 4072 DependentVectorTypes.InsertNode(New, InsertPos); 4073 } else { 4074 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, 4075 SourceLocation(), VecKind); 4076 New = new (*this, alignof(DependentVectorType)) 4077 DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind); 4078 } 4079 } 4080 4081 Types.push_back(New); 4082 return QualType(New, 0); 4083 } 4084 4085 /// getExtVectorType - Return the unique reference to an extended vector type of 4086 /// the specified element type and size. VectorType must be a built-in type. 4087 QualType ASTContext::getExtVectorType(QualType vecType, 4088 unsigned NumElts) const { 4089 assert(vecType->isBuiltinType() || vecType->isDependentType() || 4090 (vecType->isBitIntType() && 4091 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4092 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) && 4093 vecType->castAs<BitIntType>()->getNumBits() >= 8)); 4094 4095 // Check if we've already instantiated a vector of this type. 4096 llvm::FoldingSetNodeID ID; 4097 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, 4098 VectorKind::Generic); 4099 void *InsertPos = nullptr; 4100 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4101 return QualType(VTP, 0); 4102 4103 // If the element type isn't canonical, this won't be a canonical type either, 4104 // so fill in the canonical type field. 4105 QualType Canonical; 4106 if (!vecType.isCanonical()) { 4107 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); 4108 4109 // Get the new insert position for the node we care about. 4110 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4111 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4112 } 4113 auto *New = new (*this, alignof(ExtVectorType)) 4114 ExtVectorType(vecType, NumElts, Canonical); 4115 VectorTypes.InsertNode(New, InsertPos); 4116 Types.push_back(New); 4117 return QualType(New, 0); 4118 } 4119 4120 QualType 4121 ASTContext::getDependentSizedExtVectorType(QualType vecType, 4122 Expr *SizeExpr, 4123 SourceLocation AttrLoc) const { 4124 llvm::FoldingSetNodeID ID; 4125 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), 4126 SizeExpr); 4127 4128 void *InsertPos = nullptr; 4129 DependentSizedExtVectorType *Canon 4130 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4131 DependentSizedExtVectorType *New; 4132 if (Canon) { 4133 // We already have a canonical version of this array type; use it as 4134 // the canonical type for a newly-built type. 4135 New = new (*this, alignof(DependentSizedExtVectorType)) 4136 DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr, 4137 AttrLoc); 4138 } else { 4139 QualType CanonVecTy = getCanonicalType(vecType); 4140 if (CanonVecTy == vecType) { 4141 New = new (*this, alignof(DependentSizedExtVectorType)) 4142 DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc); 4143 4144 DependentSizedExtVectorType *CanonCheck 4145 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4146 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); 4147 (void)CanonCheck; 4148 DependentSizedExtVectorTypes.InsertNode(New, InsertPos); 4149 } else { 4150 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, 4151 SourceLocation()); 4152 New = new (*this, alignof(DependentSizedExtVectorType)) 4153 DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc); 4154 } 4155 } 4156 4157 Types.push_back(New); 4158 return QualType(New, 0); 4159 } 4160 4161 QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, 4162 unsigned NumColumns) const { 4163 llvm::FoldingSetNodeID ID; 4164 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, 4165 Type::ConstantMatrix); 4166 4167 assert(MatrixType::isValidElementType(ElementTy) && 4168 "need a valid element type"); 4169 assert(ConstantMatrixType::isDimensionValid(NumRows) && 4170 ConstantMatrixType::isDimensionValid(NumColumns) && 4171 "need valid matrix dimensions"); 4172 void *InsertPos = nullptr; 4173 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) 4174 return QualType(MTP, 0); 4175 4176 QualType Canonical; 4177 if (!ElementTy.isCanonical()) { 4178 Canonical = 4179 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); 4180 4181 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4182 assert(!NewIP && "Matrix type shouldn't already exist in the map"); 4183 (void)NewIP; 4184 } 4185 4186 auto *New = new (*this, alignof(ConstantMatrixType)) 4187 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); 4188 MatrixTypes.InsertNode(New, InsertPos); 4189 Types.push_back(New); 4190 return QualType(New, 0); 4191 } 4192 4193 QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, 4194 Expr *RowExpr, 4195 Expr *ColumnExpr, 4196 SourceLocation AttrLoc) const { 4197 QualType CanonElementTy = getCanonicalType(ElementTy); 4198 llvm::FoldingSetNodeID ID; 4199 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, 4200 ColumnExpr); 4201 4202 void *InsertPos = nullptr; 4203 DependentSizedMatrixType *Canon = 4204 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4205 4206 if (!Canon) { 4207 Canon = new (*this, alignof(DependentSizedMatrixType)) 4208 DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr, 4209 ColumnExpr, AttrLoc); 4210 #ifndef NDEBUG 4211 DependentSizedMatrixType *CanonCheck = 4212 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4213 assert(!CanonCheck && "Dependent-sized matrix canonical type broken"); 4214 #endif 4215 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); 4216 Types.push_back(Canon); 4217 } 4218 4219 // Already have a canonical version of the matrix type 4220 // 4221 // If it exactly matches the requested type, use it directly. 4222 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && 4223 Canon->getRowExpr() == ColumnExpr) 4224 return QualType(Canon, 0); 4225 4226 // Use Canon as the canonical type for newly-built type. 4227 DependentSizedMatrixType *New = new (*this, alignof(DependentSizedMatrixType)) 4228 DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr, 4229 ColumnExpr, AttrLoc); 4230 Types.push_back(New); 4231 return QualType(New, 0); 4232 } 4233 4234 QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, 4235 Expr *AddrSpaceExpr, 4236 SourceLocation AttrLoc) const { 4237 assert(AddrSpaceExpr->isInstantiationDependent()); 4238 4239 QualType canonPointeeType = getCanonicalType(PointeeType); 4240 4241 void *insertPos = nullptr; 4242 llvm::FoldingSetNodeID ID; 4243 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, 4244 AddrSpaceExpr); 4245 4246 DependentAddressSpaceType *canonTy = 4247 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); 4248 4249 if (!canonTy) { 4250 canonTy = new (*this, alignof(DependentAddressSpaceType)) 4251 DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr, 4252 AttrLoc); 4253 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); 4254 Types.push_back(canonTy); 4255 } 4256 4257 if (canonPointeeType == PointeeType && 4258 canonTy->getAddrSpaceExpr() == AddrSpaceExpr) 4259 return QualType(canonTy, 0); 4260 4261 auto *sugaredType = new (*this, alignof(DependentAddressSpaceType)) 4262 DependentAddressSpaceType(PointeeType, QualType(canonTy, 0), 4263 AddrSpaceExpr, AttrLoc); 4264 Types.push_back(sugaredType); 4265 return QualType(sugaredType, 0); 4266 } 4267 4268 /// Determine whether \p T is canonical as the result type of a function. 4269 static bool isCanonicalResultType(QualType T) { 4270 return T.isCanonical() && 4271 (T.getObjCLifetime() == Qualifiers::OCL_None || 4272 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); 4273 } 4274 4275 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. 4276 QualType 4277 ASTContext::getFunctionNoProtoType(QualType ResultTy, 4278 const FunctionType::ExtInfo &Info) const { 4279 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter 4280 // functionality creates a function without a prototype regardless of 4281 // language mode (so it makes them even in C++). Once the rewriter has been 4282 // fixed, this assertion can be enabled again. 4283 //assert(!LangOpts.requiresStrictPrototypes() && 4284 // "strict prototypes are disabled"); 4285 4286 // Unique functions, to guarantee there is only one function of a particular 4287 // structure. 4288 llvm::FoldingSetNodeID ID; 4289 FunctionNoProtoType::Profile(ID, ResultTy, Info); 4290 4291 void *InsertPos = nullptr; 4292 if (FunctionNoProtoType *FT = 4293 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 4294 return QualType(FT, 0); 4295 4296 QualType Canonical; 4297 if (!isCanonicalResultType(ResultTy)) { 4298 Canonical = 4299 getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); 4300 4301 // Get the new insert position for the node we care about. 4302 FunctionNoProtoType *NewIP = 4303 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4304 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4305 } 4306 4307 auto *New = new (*this, alignof(FunctionNoProtoType)) 4308 FunctionNoProtoType(ResultTy, Canonical, Info); 4309 Types.push_back(New); 4310 FunctionNoProtoTypes.InsertNode(New, InsertPos); 4311 return QualType(New, 0); 4312 } 4313 4314 CanQualType 4315 ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { 4316 CanQualType CanResultType = getCanonicalType(ResultType); 4317 4318 // Canonical result types do not have ARC lifetime qualifiers. 4319 if (CanResultType.getQualifiers().hasObjCLifetime()) { 4320 Qualifiers Qs = CanResultType.getQualifiers(); 4321 Qs.removeObjCLifetime(); 4322 return CanQualType::CreateUnsafe( 4323 getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); 4324 } 4325 4326 return CanResultType; 4327 } 4328 4329 static bool isCanonicalExceptionSpecification( 4330 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { 4331 if (ESI.Type == EST_None) 4332 return true; 4333 if (!NoexceptInType) 4334 return false; 4335 4336 // C++17 onwards: exception specification is part of the type, as a simple 4337 // boolean "can this function type throw". 4338 if (ESI.Type == EST_BasicNoexcept) 4339 return true; 4340 4341 // A noexcept(expr) specification is (possibly) canonical if expr is 4342 // value-dependent. 4343 if (ESI.Type == EST_DependentNoexcept) 4344 return true; 4345 4346 // A dynamic exception specification is canonical if it only contains pack 4347 // expansions (so we can't tell whether it's non-throwing) and all its 4348 // contained types are canonical. 4349 if (ESI.Type == EST_Dynamic) { 4350 bool AnyPackExpansions = false; 4351 for (QualType ET : ESI.Exceptions) { 4352 if (!ET.isCanonical()) 4353 return false; 4354 if (ET->getAs<PackExpansionType>()) 4355 AnyPackExpansions = true; 4356 } 4357 return AnyPackExpansions; 4358 } 4359 4360 return false; 4361 } 4362 4363 QualType ASTContext::getFunctionTypeInternal( 4364 QualType ResultTy, ArrayRef<QualType> ArgArray, 4365 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { 4366 size_t NumArgs = ArgArray.size(); 4367 4368 // Unique functions, to guarantee there is only one function of a particular 4369 // structure. 4370 llvm::FoldingSetNodeID ID; 4371 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, 4372 *this, true); 4373 4374 QualType Canonical; 4375 bool Unique = false; 4376 4377 void *InsertPos = nullptr; 4378 if (FunctionProtoType *FPT = 4379 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4380 QualType Existing = QualType(FPT, 0); 4381 4382 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse 4383 // it so long as our exception specification doesn't contain a dependent 4384 // noexcept expression, or we're just looking for a canonical type. 4385 // Otherwise, we're going to need to create a type 4386 // sugar node to hold the concrete expression. 4387 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || 4388 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) 4389 return Existing; 4390 4391 // We need a new type sugar node for this one, to hold the new noexcept 4392 // expression. We do no canonicalization here, but that's OK since we don't 4393 // expect to see the same noexcept expression much more than once. 4394 Canonical = getCanonicalType(Existing); 4395 Unique = true; 4396 } 4397 4398 bool NoexceptInType = getLangOpts().CPlusPlus17; 4399 bool IsCanonicalExceptionSpec = 4400 isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); 4401 4402 // Determine whether the type being created is already canonical or not. 4403 bool isCanonical = !Unique && IsCanonicalExceptionSpec && 4404 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; 4405 for (unsigned i = 0; i != NumArgs && isCanonical; ++i) 4406 if (!ArgArray[i].isCanonicalAsParam()) 4407 isCanonical = false; 4408 4409 if (OnlyWantCanonical) 4410 assert(isCanonical && 4411 "given non-canonical parameters constructing canonical type"); 4412 4413 // If this type isn't canonical, get the canonical version of it if we don't 4414 // already have it. The exception spec is only partially part of the 4415 // canonical type, and only in C++17 onwards. 4416 if (!isCanonical && Canonical.isNull()) { 4417 SmallVector<QualType, 16> CanonicalArgs; 4418 CanonicalArgs.reserve(NumArgs); 4419 for (unsigned i = 0; i != NumArgs; ++i) 4420 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); 4421 4422 llvm::SmallVector<QualType, 8> ExceptionTypeStorage; 4423 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; 4424 CanonicalEPI.HasTrailingReturn = false; 4425 4426 if (IsCanonicalExceptionSpec) { 4427 // Exception spec is already OK. 4428 } else if (NoexceptInType) { 4429 switch (EPI.ExceptionSpec.Type) { 4430 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: 4431 // We don't know yet. It shouldn't matter what we pick here; no-one 4432 // should ever look at this. 4433 [[fallthrough]]; 4434 case EST_None: case EST_MSAny: case EST_NoexceptFalse: 4435 CanonicalEPI.ExceptionSpec.Type = EST_None; 4436 break; 4437 4438 // A dynamic exception specification is almost always "not noexcept", 4439 // with the exception that a pack expansion might expand to no types. 4440 case EST_Dynamic: { 4441 bool AnyPacks = false; 4442 for (QualType ET : EPI.ExceptionSpec.Exceptions) { 4443 if (ET->getAs<PackExpansionType>()) 4444 AnyPacks = true; 4445 ExceptionTypeStorage.push_back(getCanonicalType(ET)); 4446 } 4447 if (!AnyPacks) 4448 CanonicalEPI.ExceptionSpec.Type = EST_None; 4449 else { 4450 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; 4451 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; 4452 } 4453 break; 4454 } 4455 4456 case EST_DynamicNone: 4457 case EST_BasicNoexcept: 4458 case EST_NoexceptTrue: 4459 case EST_NoThrow: 4460 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; 4461 break; 4462 4463 case EST_DependentNoexcept: 4464 llvm_unreachable("dependent noexcept is already canonical"); 4465 } 4466 } else { 4467 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); 4468 } 4469 4470 // Adjust the canonical function result type. 4471 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); 4472 Canonical = 4473 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); 4474 4475 // Get the new insert position for the node we care about. 4476 FunctionProtoType *NewIP = 4477 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4478 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4479 } 4480 4481 // Compute the needed size to hold this FunctionProtoType and the 4482 // various trailing objects. 4483 auto ESH = FunctionProtoType::getExceptionSpecSize( 4484 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); 4485 size_t Size = FunctionProtoType::totalSizeToAlloc< 4486 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, 4487 FunctionType::ExceptionType, Expr *, FunctionDecl *, 4488 FunctionProtoType::ExtParameterInfo, Qualifiers>( 4489 NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(), 4490 ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr, 4491 EPI.ExtParameterInfos ? NumArgs : 0, 4492 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); 4493 4494 auto *FTP = (FunctionProtoType *)Allocate(Size, alignof(FunctionProtoType)); 4495 FunctionProtoType::ExtProtoInfo newEPI = EPI; 4496 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); 4497 Types.push_back(FTP); 4498 if (!Unique) 4499 FunctionProtoTypes.InsertNode(FTP, InsertPos); 4500 return QualType(FTP, 0); 4501 } 4502 4503 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { 4504 llvm::FoldingSetNodeID ID; 4505 PipeType::Profile(ID, T, ReadOnly); 4506 4507 void *InsertPos = nullptr; 4508 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) 4509 return QualType(PT, 0); 4510 4511 // If the pipe element type isn't canonical, this won't be a canonical type 4512 // either, so fill in the canonical type field. 4513 QualType Canonical; 4514 if (!T.isCanonical()) { 4515 Canonical = getPipeType(getCanonicalType(T), ReadOnly); 4516 4517 // Get the new insert position for the node we care about. 4518 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); 4519 assert(!NewIP && "Shouldn't be in the map!"); 4520 (void)NewIP; 4521 } 4522 auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly); 4523 Types.push_back(New); 4524 PipeTypes.InsertNode(New, InsertPos); 4525 return QualType(New, 0); 4526 } 4527 4528 QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { 4529 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. 4530 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) 4531 : Ty; 4532 } 4533 4534 QualType ASTContext::getReadPipeType(QualType T) const { 4535 return getPipeType(T, true); 4536 } 4537 4538 QualType ASTContext::getWritePipeType(QualType T) const { 4539 return getPipeType(T, false); 4540 } 4541 4542 QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const { 4543 llvm::FoldingSetNodeID ID; 4544 BitIntType::Profile(ID, IsUnsigned, NumBits); 4545 4546 void *InsertPos = nullptr; 4547 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4548 return QualType(EIT, 0); 4549 4550 auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits); 4551 BitIntTypes.InsertNode(New, InsertPos); 4552 Types.push_back(New); 4553 return QualType(New, 0); 4554 } 4555 4556 QualType ASTContext::getDependentBitIntType(bool IsUnsigned, 4557 Expr *NumBitsExpr) const { 4558 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); 4559 llvm::FoldingSetNodeID ID; 4560 DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); 4561 4562 void *InsertPos = nullptr; 4563 if (DependentBitIntType *Existing = 4564 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4565 return QualType(Existing, 0); 4566 4567 auto *New = new (*this, alignof(DependentBitIntType)) 4568 DependentBitIntType(IsUnsigned, NumBitsExpr); 4569 DependentBitIntTypes.InsertNode(New, InsertPos); 4570 4571 Types.push_back(New); 4572 return QualType(New, 0); 4573 } 4574 4575 #ifndef NDEBUG 4576 static bool NeedsInjectedClassNameType(const RecordDecl *D) { 4577 if (!isa<CXXRecordDecl>(D)) return false; 4578 const auto *RD = cast<CXXRecordDecl>(D); 4579 if (isa<ClassTemplatePartialSpecializationDecl>(RD)) 4580 return true; 4581 if (RD->getDescribedClassTemplate() && 4582 !isa<ClassTemplateSpecializationDecl>(RD)) 4583 return true; 4584 return false; 4585 } 4586 #endif 4587 4588 /// getInjectedClassNameType - Return the unique reference to the 4589 /// injected class name type for the specified templated declaration. 4590 QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, 4591 QualType TST) const { 4592 assert(NeedsInjectedClassNameType(Decl)); 4593 if (Decl->TypeForDecl) { 4594 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4595 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { 4596 assert(PrevDecl->TypeForDecl && "previous declaration has no type"); 4597 Decl->TypeForDecl = PrevDecl->TypeForDecl; 4598 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4599 } else { 4600 Type *newType = new (*this, alignof(InjectedClassNameType)) 4601 InjectedClassNameType(Decl, TST); 4602 Decl->TypeForDecl = newType; 4603 Types.push_back(newType); 4604 } 4605 return QualType(Decl->TypeForDecl, 0); 4606 } 4607 4608 /// getTypeDeclType - Return the unique reference to the type for the 4609 /// specified type declaration. 4610 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { 4611 assert(Decl && "Passed null for Decl param"); 4612 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); 4613 4614 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl)) 4615 return getTypedefType(Typedef); 4616 4617 assert(!isa<TemplateTypeParmDecl>(Decl) && 4618 "Template type parameter types are always available."); 4619 4620 if (const auto *Record = dyn_cast<RecordDecl>(Decl)) { 4621 assert(Record->isFirstDecl() && "struct/union has previous declaration"); 4622 assert(!NeedsInjectedClassNameType(Record)); 4623 return getRecordType(Record); 4624 } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) { 4625 assert(Enum->isFirstDecl() && "enum has previous declaration"); 4626 return getEnumType(Enum); 4627 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { 4628 return getUnresolvedUsingType(Using); 4629 } else 4630 llvm_unreachable("TypeDecl without a type?"); 4631 4632 return QualType(Decl->TypeForDecl, 0); 4633 } 4634 4635 /// getTypedefType - Return the unique reference to the type for the 4636 /// specified typedef name decl. 4637 QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, 4638 QualType Underlying) const { 4639 if (!Decl->TypeForDecl) { 4640 if (Underlying.isNull()) 4641 Underlying = Decl->getUnderlyingType(); 4642 auto *NewType = new (*this, alignof(TypedefType)) TypedefType( 4643 Type::Typedef, Decl, QualType(), getCanonicalType(Underlying)); 4644 Decl->TypeForDecl = NewType; 4645 Types.push_back(NewType); 4646 return QualType(NewType, 0); 4647 } 4648 if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying) 4649 return QualType(Decl->TypeForDecl, 0); 4650 assert(hasSameType(Decl->getUnderlyingType(), Underlying)); 4651 4652 llvm::FoldingSetNodeID ID; 4653 TypedefType::Profile(ID, Decl, Underlying); 4654 4655 void *InsertPos = nullptr; 4656 if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4657 assert(!T->typeMatchesDecl() && 4658 "non-divergent case should be handled with TypeDecl"); 4659 return QualType(T, 0); 4660 } 4661 4662 void *Mem = Allocate(TypedefType::totalSizeToAlloc<QualType>(true), 4663 alignof(TypedefType)); 4664 auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying, 4665 getCanonicalType(Underlying)); 4666 TypedefTypes.InsertNode(NewType, InsertPos); 4667 Types.push_back(NewType); 4668 return QualType(NewType, 0); 4669 } 4670 4671 QualType ASTContext::getUsingType(const UsingShadowDecl *Found, 4672 QualType Underlying) const { 4673 llvm::FoldingSetNodeID ID; 4674 UsingType::Profile(ID, Found, Underlying); 4675 4676 void *InsertPos = nullptr; 4677 if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos)) 4678 return QualType(T, 0); 4679 4680 const Type *TypeForDecl = 4681 cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl(); 4682 4683 assert(!Underlying.hasLocalQualifiers()); 4684 QualType Canon = Underlying->getCanonicalTypeInternal(); 4685 assert(TypeForDecl->getCanonicalTypeInternal() == Canon); 4686 4687 if (Underlying.getTypePtr() == TypeForDecl) 4688 Underlying = QualType(); 4689 void *Mem = 4690 Allocate(UsingType::totalSizeToAlloc<QualType>(!Underlying.isNull()), 4691 alignof(UsingType)); 4692 UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon); 4693 Types.push_back(NewType); 4694 UsingTypes.InsertNode(NewType, InsertPos); 4695 return QualType(NewType, 0); 4696 } 4697 4698 QualType ASTContext::getRecordType(const RecordDecl *Decl) const { 4699 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4700 4701 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) 4702 if (PrevDecl->TypeForDecl) 4703 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4704 4705 auto *newType = new (*this, alignof(RecordType)) RecordType(Decl); 4706 Decl->TypeForDecl = newType; 4707 Types.push_back(newType); 4708 return QualType(newType, 0); 4709 } 4710 4711 QualType ASTContext::getEnumType(const EnumDecl *Decl) const { 4712 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4713 4714 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) 4715 if (PrevDecl->TypeForDecl) 4716 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4717 4718 auto *newType = new (*this, alignof(EnumType)) EnumType(Decl); 4719 Decl->TypeForDecl = newType; 4720 Types.push_back(newType); 4721 return QualType(newType, 0); 4722 } 4723 4724 QualType ASTContext::getUnresolvedUsingType( 4725 const UnresolvedUsingTypenameDecl *Decl) const { 4726 if (Decl->TypeForDecl) 4727 return QualType(Decl->TypeForDecl, 0); 4728 4729 if (const UnresolvedUsingTypenameDecl *CanonicalDecl = 4730 Decl->getCanonicalDecl()) 4731 if (CanonicalDecl->TypeForDecl) 4732 return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0); 4733 4734 Type *newType = 4735 new (*this, alignof(UnresolvedUsingType)) UnresolvedUsingType(Decl); 4736 Decl->TypeForDecl = newType; 4737 Types.push_back(newType); 4738 return QualType(newType, 0); 4739 } 4740 4741 QualType ASTContext::getAttributedType(attr::Kind attrKind, 4742 QualType modifiedType, 4743 QualType equivalentType) const { 4744 llvm::FoldingSetNodeID id; 4745 AttributedType::Profile(id, attrKind, modifiedType, equivalentType); 4746 4747 void *insertPos = nullptr; 4748 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); 4749 if (type) return QualType(type, 0); 4750 4751 QualType canon = getCanonicalType(equivalentType); 4752 type = new (*this, alignof(AttributedType)) 4753 AttributedType(canon, attrKind, modifiedType, equivalentType); 4754 4755 Types.push_back(type); 4756 AttributedTypes.InsertNode(type, insertPos); 4757 4758 return QualType(type, 0); 4759 } 4760 4761 QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr, 4762 QualType Wrapped) { 4763 llvm::FoldingSetNodeID ID; 4764 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr); 4765 4766 void *InsertPos = nullptr; 4767 BTFTagAttributedType *Ty = 4768 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos); 4769 if (Ty) 4770 return QualType(Ty, 0); 4771 4772 QualType Canon = getCanonicalType(Wrapped); 4773 Ty = new (*this, alignof(BTFTagAttributedType)) 4774 BTFTagAttributedType(Canon, Wrapped, BTFAttr); 4775 4776 Types.push_back(Ty); 4777 BTFTagAttributedTypes.InsertNode(Ty, InsertPos); 4778 4779 return QualType(Ty, 0); 4780 } 4781 4782 /// Retrieve a substitution-result type. 4783 QualType ASTContext::getSubstTemplateTypeParmType( 4784 QualType Replacement, Decl *AssociatedDecl, unsigned Index, 4785 std::optional<unsigned> PackIndex) const { 4786 llvm::FoldingSetNodeID ID; 4787 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index, 4788 PackIndex); 4789 void *InsertPos = nullptr; 4790 SubstTemplateTypeParmType *SubstParm = 4791 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4792 4793 if (!SubstParm) { 4794 void *Mem = Allocate(SubstTemplateTypeParmType::totalSizeToAlloc<QualType>( 4795 !Replacement.isCanonical()), 4796 alignof(SubstTemplateTypeParmType)); 4797 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl, 4798 Index, PackIndex); 4799 Types.push_back(SubstParm); 4800 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 4801 } 4802 4803 return QualType(SubstParm, 0); 4804 } 4805 4806 /// Retrieve a 4807 QualType 4808 ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl, 4809 unsigned Index, bool Final, 4810 const TemplateArgument &ArgPack) { 4811 #ifndef NDEBUG 4812 for (const auto &P : ArgPack.pack_elements()) 4813 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type"); 4814 #endif 4815 4816 llvm::FoldingSetNodeID ID; 4817 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final, 4818 ArgPack); 4819 void *InsertPos = nullptr; 4820 if (SubstTemplateTypeParmPackType *SubstParm = 4821 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) 4822 return QualType(SubstParm, 0); 4823 4824 QualType Canon; 4825 { 4826 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack); 4827 if (!AssociatedDecl->isCanonicalDecl() || 4828 !CanonArgPack.structurallyEquals(ArgPack)) { 4829 Canon = getSubstTemplateTypeParmPackType( 4830 AssociatedDecl->getCanonicalDecl(), Index, Final, CanonArgPack); 4831 [[maybe_unused]] const auto *Nothing = 4832 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); 4833 assert(!Nothing); 4834 } 4835 } 4836 4837 auto *SubstParm = new (*this, alignof(SubstTemplateTypeParmPackType)) 4838 SubstTemplateTypeParmPackType(Canon, AssociatedDecl, Index, Final, 4839 ArgPack); 4840 Types.push_back(SubstParm); 4841 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); 4842 return QualType(SubstParm, 0); 4843 } 4844 4845 /// Retrieve the template type parameter type for a template 4846 /// parameter or parameter pack with the given depth, index, and (optionally) 4847 /// name. 4848 QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, 4849 bool ParameterPack, 4850 TemplateTypeParmDecl *TTPDecl) const { 4851 llvm::FoldingSetNodeID ID; 4852 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); 4853 void *InsertPos = nullptr; 4854 TemplateTypeParmType *TypeParm 4855 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4856 4857 if (TypeParm) 4858 return QualType(TypeParm, 0); 4859 4860 if (TTPDecl) { 4861 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); 4862 TypeParm = new (*this, alignof(TemplateTypeParmType)) 4863 TemplateTypeParmType(TTPDecl, Canon); 4864 4865 TemplateTypeParmType *TypeCheck 4866 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4867 assert(!TypeCheck && "Template type parameter canonical type broken"); 4868 (void)TypeCheck; 4869 } else 4870 TypeParm = new (*this, alignof(TemplateTypeParmType)) 4871 TemplateTypeParmType(Depth, Index, ParameterPack); 4872 4873 Types.push_back(TypeParm); 4874 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); 4875 4876 return QualType(TypeParm, 0); 4877 } 4878 4879 TypeSourceInfo * 4880 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, 4881 SourceLocation NameLoc, 4882 const TemplateArgumentListInfo &Args, 4883 QualType Underlying) const { 4884 assert(!Name.getAsDependentTemplateName() && 4885 "No dependent template names here!"); 4886 QualType TST = 4887 getTemplateSpecializationType(Name, Args.arguments(), Underlying); 4888 4889 TypeSourceInfo *DI = CreateTypeSourceInfo(TST); 4890 TemplateSpecializationTypeLoc TL = 4891 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); 4892 TL.setTemplateKeywordLoc(SourceLocation()); 4893 TL.setTemplateNameLoc(NameLoc); 4894 TL.setLAngleLoc(Args.getLAngleLoc()); 4895 TL.setRAngleLoc(Args.getRAngleLoc()); 4896 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) 4897 TL.setArgLocInfo(i, Args[i].getLocInfo()); 4898 return DI; 4899 } 4900 4901 QualType 4902 ASTContext::getTemplateSpecializationType(TemplateName Template, 4903 ArrayRef<TemplateArgumentLoc> Args, 4904 QualType Underlying) const { 4905 assert(!Template.getAsDependentTemplateName() && 4906 "No dependent template names here!"); 4907 4908 SmallVector<TemplateArgument, 4> ArgVec; 4909 ArgVec.reserve(Args.size()); 4910 for (const TemplateArgumentLoc &Arg : Args) 4911 ArgVec.push_back(Arg.getArgument()); 4912 4913 return getTemplateSpecializationType(Template, ArgVec, Underlying); 4914 } 4915 4916 #ifndef NDEBUG 4917 static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) { 4918 for (const TemplateArgument &Arg : Args) 4919 if (Arg.isPackExpansion()) 4920 return true; 4921 4922 return true; 4923 } 4924 #endif 4925 4926 QualType 4927 ASTContext::getTemplateSpecializationType(TemplateName Template, 4928 ArrayRef<TemplateArgument> Args, 4929 QualType Underlying) const { 4930 assert(!Template.getAsDependentTemplateName() && 4931 "No dependent template names here!"); 4932 // Look through qualified template names. 4933 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4934 Template = QTN->getUnderlyingTemplate(); 4935 4936 const auto *TD = Template.getAsTemplateDecl(); 4937 bool IsTypeAlias = TD && TD->isTypeAlias(); 4938 QualType CanonType; 4939 if (!Underlying.isNull()) 4940 CanonType = getCanonicalType(Underlying); 4941 else { 4942 // We can get here with an alias template when the specialization contains 4943 // a pack expansion that does not match up with a parameter pack. 4944 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) && 4945 "Caller must compute aliased type"); 4946 IsTypeAlias = false; 4947 CanonType = getCanonicalTemplateSpecializationType(Template, Args); 4948 } 4949 4950 // Allocate the (non-canonical) template specialization type, but don't 4951 // try to unique it: these types typically have location information that 4952 // we don't unique and don't want to lose. 4953 void *Mem = Allocate(sizeof(TemplateSpecializationType) + 4954 sizeof(TemplateArgument) * Args.size() + 4955 (IsTypeAlias ? sizeof(QualType) : 0), 4956 alignof(TemplateSpecializationType)); 4957 auto *Spec 4958 = new (Mem) TemplateSpecializationType(Template, Args, CanonType, 4959 IsTypeAlias ? Underlying : QualType()); 4960 4961 Types.push_back(Spec); 4962 return QualType(Spec, 0); 4963 } 4964 4965 QualType ASTContext::getCanonicalTemplateSpecializationType( 4966 TemplateName Template, ArrayRef<TemplateArgument> Args) const { 4967 assert(!Template.getAsDependentTemplateName() && 4968 "No dependent template names here!"); 4969 4970 // Look through qualified template names. 4971 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4972 Template = TemplateName(QTN->getUnderlyingTemplate()); 4973 4974 // Build the canonical template specialization type. 4975 TemplateName CanonTemplate = getCanonicalTemplateName(Template); 4976 bool AnyNonCanonArgs = false; 4977 auto CanonArgs = 4978 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 4979 4980 // Determine whether this canonical template specialization type already 4981 // exists. 4982 llvm::FoldingSetNodeID ID; 4983 TemplateSpecializationType::Profile(ID, CanonTemplate, 4984 CanonArgs, *this); 4985 4986 void *InsertPos = nullptr; 4987 TemplateSpecializationType *Spec 4988 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 4989 4990 if (!Spec) { 4991 // Allocate a new canonical template specialization type. 4992 void *Mem = Allocate((sizeof(TemplateSpecializationType) + 4993 sizeof(TemplateArgument) * CanonArgs.size()), 4994 alignof(TemplateSpecializationType)); 4995 Spec = new (Mem) TemplateSpecializationType(CanonTemplate, 4996 CanonArgs, 4997 QualType(), QualType()); 4998 Types.push_back(Spec); 4999 TemplateSpecializationTypes.InsertNode(Spec, InsertPos); 5000 } 5001 5002 assert(Spec->isDependentType() && 5003 "Non-dependent template-id type must have a canonical type"); 5004 return QualType(Spec, 0); 5005 } 5006 5007 QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, 5008 NestedNameSpecifier *NNS, 5009 QualType NamedType, 5010 TagDecl *OwnedTagDecl) const { 5011 llvm::FoldingSetNodeID ID; 5012 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); 5013 5014 void *InsertPos = nullptr; 5015 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5016 if (T) 5017 return QualType(T, 0); 5018 5019 QualType Canon = NamedType; 5020 if (!Canon.isCanonical()) { 5021 Canon = getCanonicalType(NamedType); 5022 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5023 assert(!CheckT && "Elaborated canonical type broken"); 5024 (void)CheckT; 5025 } 5026 5027 void *Mem = 5028 Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), 5029 alignof(ElaboratedType)); 5030 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); 5031 5032 Types.push_back(T); 5033 ElaboratedTypes.InsertNode(T, InsertPos); 5034 return QualType(T, 0); 5035 } 5036 5037 QualType 5038 ASTContext::getParenType(QualType InnerType) const { 5039 llvm::FoldingSetNodeID ID; 5040 ParenType::Profile(ID, InnerType); 5041 5042 void *InsertPos = nullptr; 5043 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5044 if (T) 5045 return QualType(T, 0); 5046 5047 QualType Canon = InnerType; 5048 if (!Canon.isCanonical()) { 5049 Canon = getCanonicalType(InnerType); 5050 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5051 assert(!CheckT && "Paren canonical type broken"); 5052 (void)CheckT; 5053 } 5054 5055 T = new (*this, alignof(ParenType)) ParenType(InnerType, Canon); 5056 Types.push_back(T); 5057 ParenTypes.InsertNode(T, InsertPos); 5058 return QualType(T, 0); 5059 } 5060 5061 QualType 5062 ASTContext::getMacroQualifiedType(QualType UnderlyingTy, 5063 const IdentifierInfo *MacroII) const { 5064 QualType Canon = UnderlyingTy; 5065 if (!Canon.isCanonical()) 5066 Canon = getCanonicalType(UnderlyingTy); 5067 5068 auto *newType = new (*this, alignof(MacroQualifiedType)) 5069 MacroQualifiedType(UnderlyingTy, Canon, MacroII); 5070 Types.push_back(newType); 5071 return QualType(newType, 0); 5072 } 5073 5074 QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, 5075 NestedNameSpecifier *NNS, 5076 const IdentifierInfo *Name, 5077 QualType Canon) const { 5078 if (Canon.isNull()) { 5079 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5080 if (CanonNNS != NNS) 5081 Canon = getDependentNameType(Keyword, CanonNNS, Name); 5082 } 5083 5084 llvm::FoldingSetNodeID ID; 5085 DependentNameType::Profile(ID, Keyword, NNS, Name); 5086 5087 void *InsertPos = nullptr; 5088 DependentNameType *T 5089 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); 5090 if (T) 5091 return QualType(T, 0); 5092 5093 T = new (*this, alignof(DependentNameType)) 5094 DependentNameType(Keyword, NNS, Name, Canon); 5095 Types.push_back(T); 5096 DependentNameTypes.InsertNode(T, InsertPos); 5097 return QualType(T, 0); 5098 } 5099 5100 QualType ASTContext::getDependentTemplateSpecializationType( 5101 ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, 5102 const IdentifierInfo *Name, ArrayRef<TemplateArgumentLoc> Args) const { 5103 // TODO: avoid this copy 5104 SmallVector<TemplateArgument, 16> ArgCopy; 5105 for (unsigned I = 0, E = Args.size(); I != E; ++I) 5106 ArgCopy.push_back(Args[I].getArgument()); 5107 return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); 5108 } 5109 5110 QualType 5111 ASTContext::getDependentTemplateSpecializationType( 5112 ElaboratedTypeKeyword Keyword, 5113 NestedNameSpecifier *NNS, 5114 const IdentifierInfo *Name, 5115 ArrayRef<TemplateArgument> Args) const { 5116 assert((!NNS || NNS->isDependent()) && 5117 "nested-name-specifier must be dependent"); 5118 5119 llvm::FoldingSetNodeID ID; 5120 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, 5121 Name, Args); 5122 5123 void *InsertPos = nullptr; 5124 DependentTemplateSpecializationType *T 5125 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5126 if (T) 5127 return QualType(T, 0); 5128 5129 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5130 5131 ElaboratedTypeKeyword CanonKeyword = Keyword; 5132 if (Keyword == ElaboratedTypeKeyword::None) 5133 CanonKeyword = ElaboratedTypeKeyword::Typename; 5134 5135 bool AnyNonCanonArgs = false; 5136 auto CanonArgs = 5137 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 5138 5139 QualType Canon; 5140 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { 5141 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, 5142 Name, 5143 CanonArgs); 5144 5145 // Find the insert position again. 5146 [[maybe_unused]] auto *Nothing = 5147 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5148 assert(!Nothing && "canonical type broken"); 5149 } 5150 5151 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + 5152 sizeof(TemplateArgument) * Args.size()), 5153 alignof(DependentTemplateSpecializationType)); 5154 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, 5155 Name, Args, Canon); 5156 Types.push_back(T); 5157 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); 5158 return QualType(T, 0); 5159 } 5160 5161 TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { 5162 TemplateArgument Arg; 5163 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { 5164 QualType ArgType = getTypeDeclType(TTP); 5165 if (TTP->isParameterPack()) 5166 ArgType = getPackExpansionType(ArgType, std::nullopt); 5167 5168 Arg = TemplateArgument(ArgType); 5169 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { 5170 QualType T = 5171 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this); 5172 // For class NTTPs, ensure we include the 'const' so the type matches that 5173 // of a real template argument. 5174 // FIXME: It would be more faithful to model this as something like an 5175 // lvalue-to-rvalue conversion applied to a const-qualified lvalue. 5176 if (T->isRecordType()) 5177 T.addConst(); 5178 Expr *E = new (*this) DeclRefExpr( 5179 *this, NTTP, /*RefersToEnclosingVariableOrCapture*/ false, T, 5180 Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); 5181 5182 if (NTTP->isParameterPack()) 5183 E = new (*this) 5184 PackExpansionExpr(DependentTy, E, NTTP->getLocation(), std::nullopt); 5185 Arg = TemplateArgument(E); 5186 } else { 5187 auto *TTP = cast<TemplateTemplateParmDecl>(Param); 5188 if (TTP->isParameterPack()) 5189 Arg = TemplateArgument(TemplateName(TTP), std::optional<unsigned>()); 5190 else 5191 Arg = TemplateArgument(TemplateName(TTP)); 5192 } 5193 5194 if (Param->isTemplateParameterPack()) 5195 Arg = TemplateArgument::CreatePackCopy(*this, Arg); 5196 5197 return Arg; 5198 } 5199 5200 void 5201 ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, 5202 SmallVectorImpl<TemplateArgument> &Args) { 5203 Args.reserve(Args.size() + Params->size()); 5204 5205 for (NamedDecl *Param : *Params) 5206 Args.push_back(getInjectedTemplateArg(Param)); 5207 } 5208 5209 QualType ASTContext::getPackExpansionType(QualType Pattern, 5210 std::optional<unsigned> NumExpansions, 5211 bool ExpectPackInType) { 5212 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && 5213 "Pack expansions must expand one or more parameter packs"); 5214 5215 llvm::FoldingSetNodeID ID; 5216 PackExpansionType::Profile(ID, Pattern, NumExpansions); 5217 5218 void *InsertPos = nullptr; 5219 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5220 if (T) 5221 return QualType(T, 0); 5222 5223 QualType Canon; 5224 if (!Pattern.isCanonical()) { 5225 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, 5226 /*ExpectPackInType=*/false); 5227 5228 // Find the insert position again, in case we inserted an element into 5229 // PackExpansionTypes and invalidated our insert position. 5230 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5231 } 5232 5233 T = new (*this, alignof(PackExpansionType)) 5234 PackExpansionType(Pattern, Canon, NumExpansions); 5235 Types.push_back(T); 5236 PackExpansionTypes.InsertNode(T, InsertPos); 5237 return QualType(T, 0); 5238 } 5239 5240 /// CmpProtocolNames - Comparison predicate for sorting protocols 5241 /// alphabetically. 5242 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, 5243 ObjCProtocolDecl *const *RHS) { 5244 return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); 5245 } 5246 5247 static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) { 5248 if (Protocols.empty()) return true; 5249 5250 if (Protocols[0]->getCanonicalDecl() != Protocols[0]) 5251 return false; 5252 5253 for (unsigned i = 1; i != Protocols.size(); ++i) 5254 if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || 5255 Protocols[i]->getCanonicalDecl() != Protocols[i]) 5256 return false; 5257 return true; 5258 } 5259 5260 static void 5261 SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) { 5262 // Sort protocols, keyed by name. 5263 llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); 5264 5265 // Canonicalize. 5266 for (ObjCProtocolDecl *&P : Protocols) 5267 P = P->getCanonicalDecl(); 5268 5269 // Remove duplicates. 5270 auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); 5271 Protocols.erase(ProtocolsEnd, Protocols.end()); 5272 } 5273 5274 QualType ASTContext::getObjCObjectType(QualType BaseType, 5275 ObjCProtocolDecl * const *Protocols, 5276 unsigned NumProtocols) const { 5277 return getObjCObjectType(BaseType, {}, 5278 llvm::ArrayRef(Protocols, NumProtocols), 5279 /*isKindOf=*/false); 5280 } 5281 5282 QualType ASTContext::getObjCObjectType( 5283 QualType baseType, 5284 ArrayRef<QualType> typeArgs, 5285 ArrayRef<ObjCProtocolDecl *> protocols, 5286 bool isKindOf) const { 5287 // If the base type is an interface and there aren't any protocols or 5288 // type arguments to add, then the interface type will do just fine. 5289 if (typeArgs.empty() && protocols.empty() && !isKindOf && 5290 isa<ObjCInterfaceType>(baseType)) 5291 return baseType; 5292 5293 // Look in the folding set for an existing type. 5294 llvm::FoldingSetNodeID ID; 5295 ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); 5296 void *InsertPos = nullptr; 5297 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) 5298 return QualType(QT, 0); 5299 5300 // Determine the type arguments to be used for canonicalization, 5301 // which may be explicitly specified here or written on the base 5302 // type. 5303 ArrayRef<QualType> effectiveTypeArgs = typeArgs; 5304 if (effectiveTypeArgs.empty()) { 5305 if (const auto *baseObject = baseType->getAs<ObjCObjectType>()) 5306 effectiveTypeArgs = baseObject->getTypeArgs(); 5307 } 5308 5309 // Build the canonical type, which has the canonical base type and a 5310 // sorted-and-uniqued list of protocols and the type arguments 5311 // canonicalized. 5312 QualType canonical; 5313 bool typeArgsAreCanonical = llvm::all_of( 5314 effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); }); 5315 bool protocolsSorted = areSortedAndUniqued(protocols); 5316 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { 5317 // Determine the canonical type arguments. 5318 ArrayRef<QualType> canonTypeArgs; 5319 SmallVector<QualType, 4> canonTypeArgsVec; 5320 if (!typeArgsAreCanonical) { 5321 canonTypeArgsVec.reserve(effectiveTypeArgs.size()); 5322 for (auto typeArg : effectiveTypeArgs) 5323 canonTypeArgsVec.push_back(getCanonicalType(typeArg)); 5324 canonTypeArgs = canonTypeArgsVec; 5325 } else { 5326 canonTypeArgs = effectiveTypeArgs; 5327 } 5328 5329 ArrayRef<ObjCProtocolDecl *> canonProtocols; 5330 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec; 5331 if (!protocolsSorted) { 5332 canonProtocolsVec.append(protocols.begin(), protocols.end()); 5333 SortAndUniqueProtocols(canonProtocolsVec); 5334 canonProtocols = canonProtocolsVec; 5335 } else { 5336 canonProtocols = protocols; 5337 } 5338 5339 canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, 5340 canonProtocols, isKindOf); 5341 5342 // Regenerate InsertPos. 5343 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); 5344 } 5345 5346 unsigned size = sizeof(ObjCObjectTypeImpl); 5347 size += typeArgs.size() * sizeof(QualType); 5348 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5349 void *mem = Allocate(size, alignof(ObjCObjectTypeImpl)); 5350 auto *T = 5351 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, 5352 isKindOf); 5353 5354 Types.push_back(T); 5355 ObjCObjectTypes.InsertNode(T, InsertPos); 5356 return QualType(T, 0); 5357 } 5358 5359 /// Apply Objective-C protocol qualifiers to the given type. 5360 /// If this is for the canonical type of a type parameter, we can apply 5361 /// protocol qualifiers on the ObjCObjectPointerType. 5362 QualType 5363 ASTContext::applyObjCProtocolQualifiers(QualType type, 5364 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError, 5365 bool allowOnPointerType) const { 5366 hasError = false; 5367 5368 if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) { 5369 return getObjCTypeParamType(objT->getDecl(), protocols); 5370 } 5371 5372 // Apply protocol qualifiers to ObjCObjectPointerType. 5373 if (allowOnPointerType) { 5374 if (const auto *objPtr = 5375 dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) { 5376 const ObjCObjectType *objT = objPtr->getObjectType(); 5377 // Merge protocol lists and construct ObjCObjectType. 5378 SmallVector<ObjCProtocolDecl*, 8> protocolsVec; 5379 protocolsVec.append(objT->qual_begin(), 5380 objT->qual_end()); 5381 protocolsVec.append(protocols.begin(), protocols.end()); 5382 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec; 5383 type = getObjCObjectType( 5384 objT->getBaseType(), 5385 objT->getTypeArgsAsWritten(), 5386 protocols, 5387 objT->isKindOfTypeAsWritten()); 5388 return getObjCObjectPointerType(type); 5389 } 5390 } 5391 5392 // Apply protocol qualifiers to ObjCObjectType. 5393 if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){ 5394 // FIXME: Check for protocols to which the class type is already 5395 // known to conform. 5396 5397 return getObjCObjectType(objT->getBaseType(), 5398 objT->getTypeArgsAsWritten(), 5399 protocols, 5400 objT->isKindOfTypeAsWritten()); 5401 } 5402 5403 // If the canonical type is ObjCObjectType, ... 5404 if (type->isObjCObjectType()) { 5405 // Silently overwrite any existing protocol qualifiers. 5406 // TODO: determine whether that's the right thing to do. 5407 5408 // FIXME: Check for protocols to which the class type is already 5409 // known to conform. 5410 return getObjCObjectType(type, {}, protocols, false); 5411 } 5412 5413 // id<protocol-list> 5414 if (type->isObjCIdType()) { 5415 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5416 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, 5417 objPtr->isKindOfType()); 5418 return getObjCObjectPointerType(type); 5419 } 5420 5421 // Class<protocol-list> 5422 if (type->isObjCClassType()) { 5423 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5424 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, 5425 objPtr->isKindOfType()); 5426 return getObjCObjectPointerType(type); 5427 } 5428 5429 hasError = true; 5430 return type; 5431 } 5432 5433 QualType 5434 ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, 5435 ArrayRef<ObjCProtocolDecl *> protocols) const { 5436 // Look in the folding set for an existing type. 5437 llvm::FoldingSetNodeID ID; 5438 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); 5439 void *InsertPos = nullptr; 5440 if (ObjCTypeParamType *TypeParam = 5441 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) 5442 return QualType(TypeParam, 0); 5443 5444 // We canonicalize to the underlying type. 5445 QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); 5446 if (!protocols.empty()) { 5447 // Apply the protocol qualifers. 5448 bool hasError; 5449 Canonical = getCanonicalType(applyObjCProtocolQualifiers( 5450 Canonical, protocols, hasError, true /*allowOnPointerType*/)); 5451 assert(!hasError && "Error when apply protocol qualifier to bound type"); 5452 } 5453 5454 unsigned size = sizeof(ObjCTypeParamType); 5455 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5456 void *mem = Allocate(size, alignof(ObjCTypeParamType)); 5457 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); 5458 5459 Types.push_back(newType); 5460 ObjCTypeParamTypes.InsertNode(newType, InsertPos); 5461 return QualType(newType, 0); 5462 } 5463 5464 void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, 5465 ObjCTypeParamDecl *New) const { 5466 New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); 5467 // Update TypeForDecl after updating TypeSourceInfo. 5468 auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl()); 5469 SmallVector<ObjCProtocolDecl *, 8> protocols; 5470 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); 5471 QualType UpdatedTy = getObjCTypeParamType(New, protocols); 5472 New->setTypeForDecl(UpdatedTy.getTypePtr()); 5473 } 5474 5475 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's 5476 /// protocol list adopt all protocols in QT's qualified-id protocol 5477 /// list. 5478 bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, 5479 ObjCInterfaceDecl *IC) { 5480 if (!QT->isObjCQualifiedIdType()) 5481 return false; 5482 5483 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) { 5484 // If both the right and left sides have qualifiers. 5485 for (auto *Proto : OPT->quals()) { 5486 if (!IC->ClassImplementsProtocol(Proto, false)) 5487 return false; 5488 } 5489 return true; 5490 } 5491 return false; 5492 } 5493 5494 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in 5495 /// QT's qualified-id protocol list adopt all protocols in IDecl's list 5496 /// of protocols. 5497 bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, 5498 ObjCInterfaceDecl *IDecl) { 5499 if (!QT->isObjCQualifiedIdType()) 5500 return false; 5501 const auto *OPT = QT->getAs<ObjCObjectPointerType>(); 5502 if (!OPT) 5503 return false; 5504 if (!IDecl->hasDefinition()) 5505 return false; 5506 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols; 5507 CollectInheritedProtocols(IDecl, InheritedProtocols); 5508 if (InheritedProtocols.empty()) 5509 return false; 5510 // Check that if every protocol in list of id<plist> conforms to a protocol 5511 // of IDecl's, then bridge casting is ok. 5512 bool Conforms = false; 5513 for (auto *Proto : OPT->quals()) { 5514 Conforms = false; 5515 for (auto *PI : InheritedProtocols) { 5516 if (ProtocolCompatibleWithProtocol(Proto, PI)) { 5517 Conforms = true; 5518 break; 5519 } 5520 } 5521 if (!Conforms) 5522 break; 5523 } 5524 if (Conforms) 5525 return true; 5526 5527 for (auto *PI : InheritedProtocols) { 5528 // If both the right and left sides have qualifiers. 5529 bool Adopts = false; 5530 for (auto *Proto : OPT->quals()) { 5531 // return 'true' if 'PI' is in the inheritance hierarchy of Proto 5532 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) 5533 break; 5534 } 5535 if (!Adopts) 5536 return false; 5537 } 5538 return true; 5539 } 5540 5541 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for 5542 /// the given object type. 5543 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { 5544 llvm::FoldingSetNodeID ID; 5545 ObjCObjectPointerType::Profile(ID, ObjectT); 5546 5547 void *InsertPos = nullptr; 5548 if (ObjCObjectPointerType *QT = 5549 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 5550 return QualType(QT, 0); 5551 5552 // Find the canonical object type. 5553 QualType Canonical; 5554 if (!ObjectT.isCanonical()) { 5555 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); 5556 5557 // Regenerate InsertPos. 5558 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 5559 } 5560 5561 // No match. 5562 void *Mem = 5563 Allocate(sizeof(ObjCObjectPointerType), alignof(ObjCObjectPointerType)); 5564 auto *QType = 5565 new (Mem) ObjCObjectPointerType(Canonical, ObjectT); 5566 5567 Types.push_back(QType); 5568 ObjCObjectPointerTypes.InsertNode(QType, InsertPos); 5569 return QualType(QType, 0); 5570 } 5571 5572 /// getObjCInterfaceType - Return the unique reference to the type for the 5573 /// specified ObjC interface decl. The list of protocols is optional. 5574 QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, 5575 ObjCInterfaceDecl *PrevDecl) const { 5576 if (Decl->TypeForDecl) 5577 return QualType(Decl->TypeForDecl, 0); 5578 5579 if (PrevDecl) { 5580 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); 5581 Decl->TypeForDecl = PrevDecl->TypeForDecl; 5582 return QualType(PrevDecl->TypeForDecl, 0); 5583 } 5584 5585 // Prefer the definition, if there is one. 5586 if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) 5587 Decl = Def; 5588 5589 void *Mem = Allocate(sizeof(ObjCInterfaceType), alignof(ObjCInterfaceType)); 5590 auto *T = new (Mem) ObjCInterfaceType(Decl); 5591 Decl->TypeForDecl = T; 5592 Types.push_back(T); 5593 return QualType(T, 0); 5594 } 5595 5596 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique 5597 /// TypeOfExprType AST's (since expression's are never shared). For example, 5598 /// multiple declarations that refer to "typeof(x)" all contain different 5599 /// DeclRefExpr's. This doesn't effect the type checker, since it operates 5600 /// on canonical type's (which are always unique). 5601 QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const { 5602 TypeOfExprType *toe; 5603 if (tofExpr->isTypeDependent()) { 5604 llvm::FoldingSetNodeID ID; 5605 DependentTypeOfExprType::Profile(ID, *this, tofExpr, 5606 Kind == TypeOfKind::Unqualified); 5607 5608 void *InsertPos = nullptr; 5609 DependentTypeOfExprType *Canon = 5610 DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); 5611 if (Canon) { 5612 // We already have a "canonical" version of an identical, dependent 5613 // typeof(expr) type. Use that as our canonical type. 5614 toe = new (*this, alignof(TypeOfExprType)) 5615 TypeOfExprType(tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0)); 5616 } else { 5617 // Build a new, canonical typeof(expr) type. 5618 Canon = new (*this, alignof(DependentTypeOfExprType)) 5619 DependentTypeOfExprType(tofExpr, Kind); 5620 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); 5621 toe = Canon; 5622 } 5623 } else { 5624 QualType Canonical = getCanonicalType(tofExpr->getType()); 5625 toe = new (*this, alignof(TypeOfExprType)) 5626 TypeOfExprType(tofExpr, Kind, Canonical); 5627 } 5628 Types.push_back(toe); 5629 return QualType(toe, 0); 5630 } 5631 5632 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique 5633 /// TypeOfType nodes. The only motivation to unique these nodes would be 5634 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be 5635 /// an issue. This doesn't affect the type checker, since it operates 5636 /// on canonical types (which are always unique). 5637 QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const { 5638 QualType Canonical = getCanonicalType(tofType); 5639 auto *tot = 5640 new (*this, alignof(TypeOfType)) TypeOfType(tofType, Canonical, Kind); 5641 Types.push_back(tot); 5642 return QualType(tot, 0); 5643 } 5644 5645 /// getReferenceQualifiedType - Given an expr, will return the type for 5646 /// that expression, as in [dcl.type.simple]p4 but without taking id-expressions 5647 /// and class member access into account. 5648 QualType ASTContext::getReferenceQualifiedType(const Expr *E) const { 5649 // C++11 [dcl.type.simple]p4: 5650 // [...] 5651 QualType T = E->getType(); 5652 switch (E->getValueKind()) { 5653 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the 5654 // type of e; 5655 case VK_XValue: 5656 return getRValueReferenceType(T); 5657 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the 5658 // type of e; 5659 case VK_LValue: 5660 return getLValueReferenceType(T); 5661 // - otherwise, decltype(e) is the type of e. 5662 case VK_PRValue: 5663 return T; 5664 } 5665 llvm_unreachable("Unknown value kind"); 5666 } 5667 5668 /// Unlike many "get<Type>" functions, we don't unique DecltypeType 5669 /// nodes. This would never be helpful, since each such type has its own 5670 /// expression, and would not give a significant memory saving, since there 5671 /// is an Expr tree under each such type. 5672 QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { 5673 DecltypeType *dt; 5674 5675 // C++11 [temp.type]p2: 5676 // If an expression e involves a template parameter, decltype(e) denotes a 5677 // unique dependent type. Two such decltype-specifiers refer to the same 5678 // type only if their expressions are equivalent (14.5.6.1). 5679 if (e->isInstantiationDependent()) { 5680 llvm::FoldingSetNodeID ID; 5681 DependentDecltypeType::Profile(ID, *this, e); 5682 5683 void *InsertPos = nullptr; 5684 DependentDecltypeType *Canon 5685 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); 5686 if (!Canon) { 5687 // Build a new, canonical decltype(expr) type. 5688 Canon = new (*this, alignof(DependentDecltypeType)) 5689 DependentDecltypeType(e, DependentTy); 5690 DependentDecltypeTypes.InsertNode(Canon, InsertPos); 5691 } 5692 dt = new (*this, alignof(DecltypeType)) 5693 DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); 5694 } else { 5695 dt = new (*this, alignof(DecltypeType)) 5696 DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); 5697 } 5698 Types.push_back(dt); 5699 return QualType(dt, 0); 5700 } 5701 5702 /// getUnaryTransformationType - We don't unique these, since the memory 5703 /// savings are minimal and these are rare. 5704 QualType ASTContext::getUnaryTransformType(QualType BaseType, 5705 QualType UnderlyingType, 5706 UnaryTransformType::UTTKind Kind) 5707 const { 5708 UnaryTransformType *ut = nullptr; 5709 5710 if (BaseType->isDependentType()) { 5711 // Look in the folding set for an existing type. 5712 llvm::FoldingSetNodeID ID; 5713 DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); 5714 5715 void *InsertPos = nullptr; 5716 DependentUnaryTransformType *Canon 5717 = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); 5718 5719 if (!Canon) { 5720 // Build a new, canonical __underlying_type(type) type. 5721 Canon = new (*this, alignof(DependentUnaryTransformType)) 5722 DependentUnaryTransformType(*this, getCanonicalType(BaseType), Kind); 5723 DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); 5724 } 5725 ut = new (*this, alignof(UnaryTransformType)) 5726 UnaryTransformType(BaseType, QualType(), Kind, QualType(Canon, 0)); 5727 } else { 5728 QualType CanonType = getCanonicalType(UnderlyingType); 5729 ut = new (*this, alignof(UnaryTransformType)) 5730 UnaryTransformType(BaseType, UnderlyingType, Kind, CanonType); 5731 } 5732 Types.push_back(ut); 5733 return QualType(ut, 0); 5734 } 5735 5736 QualType ASTContext::getAutoTypeInternal( 5737 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent, 5738 bool IsPack, ConceptDecl *TypeConstraintConcept, 5739 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const { 5740 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && 5741 !TypeConstraintConcept && !IsDependent) 5742 return getAutoDeductType(); 5743 5744 // Look in the folding set for an existing type. 5745 void *InsertPos = nullptr; 5746 llvm::FoldingSetNodeID ID; 5747 AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, 5748 TypeConstraintConcept, TypeConstraintArgs); 5749 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) 5750 return QualType(AT, 0); 5751 5752 QualType Canon; 5753 if (!IsCanon) { 5754 if (!DeducedType.isNull()) { 5755 Canon = DeducedType.getCanonicalType(); 5756 } else if (TypeConstraintConcept) { 5757 bool AnyNonCanonArgs = false; 5758 ConceptDecl *CanonicalConcept = TypeConstraintConcept->getCanonicalDecl(); 5759 auto CanonicalConceptArgs = ::getCanonicalTemplateArguments( 5760 *this, TypeConstraintArgs, AnyNonCanonArgs); 5761 if (CanonicalConcept != TypeConstraintConcept || AnyNonCanonArgs) { 5762 Canon = 5763 getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack, 5764 CanonicalConcept, CanonicalConceptArgs, true); 5765 // Find the insert position again. 5766 [[maybe_unused]] auto *Nothing = 5767 AutoTypes.FindNodeOrInsertPos(ID, InsertPos); 5768 assert(!Nothing && "canonical type broken"); 5769 } 5770 } 5771 } 5772 5773 void *Mem = Allocate(sizeof(AutoType) + 5774 sizeof(TemplateArgument) * TypeConstraintArgs.size(), 5775 alignof(AutoType)); 5776 auto *AT = new (Mem) AutoType( 5777 DeducedType, Keyword, 5778 (IsDependent ? TypeDependence::DependentInstantiation 5779 : TypeDependence::None) | 5780 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), 5781 Canon, TypeConstraintConcept, TypeConstraintArgs); 5782 Types.push_back(AT); 5783 AutoTypes.InsertNode(AT, InsertPos); 5784 return QualType(AT, 0); 5785 } 5786 5787 /// getAutoType - Return the uniqued reference to the 'auto' type which has been 5788 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the 5789 /// canonical deduced-but-dependent 'auto' type. 5790 QualType 5791 ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, 5792 bool IsDependent, bool IsPack, 5793 ConceptDecl *TypeConstraintConcept, 5794 ArrayRef<TemplateArgument> TypeConstraintArgs) const { 5795 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); 5796 assert((!IsDependent || DeducedType.isNull()) && 5797 "A dependent auto should be undeduced"); 5798 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack, 5799 TypeConstraintConcept, TypeConstraintArgs); 5800 } 5801 5802 QualType ASTContext::getUnconstrainedType(QualType T) const { 5803 QualType CanonT = T.getCanonicalType(); 5804 5805 // Remove a type-constraint from a top-level auto or decltype(auto). 5806 if (auto *AT = CanonT->getAs<AutoType>()) { 5807 if (!AT->isConstrained()) 5808 return T; 5809 return getQualifiedType(getAutoType(QualType(), AT->getKeyword(), false, 5810 AT->containsUnexpandedParameterPack()), 5811 T.getQualifiers()); 5812 } 5813 5814 // FIXME: We only support constrained auto at the top level in the type of a 5815 // non-type template parameter at the moment. Once we lift that restriction, 5816 // we'll need to recursively build types containing auto here. 5817 assert(!CanonT->getContainedAutoType() || 5818 !CanonT->getContainedAutoType()->isConstrained()); 5819 return T; 5820 } 5821 5822 /// Return the uniqued reference to the deduced template specialization type 5823 /// which has been deduced to the given type, or to the canonical undeduced 5824 /// such type, or the canonical deduced-but-dependent such type. 5825 QualType ASTContext::getDeducedTemplateSpecializationType( 5826 TemplateName Template, QualType DeducedType, bool IsDependent) const { 5827 // Look in the folding set for an existing type. 5828 void *InsertPos = nullptr; 5829 llvm::FoldingSetNodeID ID; 5830 DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, 5831 IsDependent); 5832 if (DeducedTemplateSpecializationType *DTST = 5833 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) 5834 return QualType(DTST, 0); 5835 5836 auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType)) 5837 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); 5838 llvm::FoldingSetNodeID TempID; 5839 DTST->Profile(TempID); 5840 assert(ID == TempID && "ID does not match"); 5841 Types.push_back(DTST); 5842 DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); 5843 return QualType(DTST, 0); 5844 } 5845 5846 /// getAtomicType - Return the uniqued reference to the atomic type for 5847 /// the given value type. 5848 QualType ASTContext::getAtomicType(QualType T) const { 5849 // Unique pointers, to guarantee there is only one pointer of a particular 5850 // structure. 5851 llvm::FoldingSetNodeID ID; 5852 AtomicType::Profile(ID, T); 5853 5854 void *InsertPos = nullptr; 5855 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) 5856 return QualType(AT, 0); 5857 5858 // If the atomic value type isn't canonical, this won't be a canonical type 5859 // either, so fill in the canonical type field. 5860 QualType Canonical; 5861 if (!T.isCanonical()) { 5862 Canonical = getAtomicType(getCanonicalType(T)); 5863 5864 // Get the new insert position for the node we care about. 5865 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); 5866 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 5867 } 5868 auto *New = new (*this, alignof(AtomicType)) AtomicType(T, Canonical); 5869 Types.push_back(New); 5870 AtomicTypes.InsertNode(New, InsertPos); 5871 return QualType(New, 0); 5872 } 5873 5874 /// getAutoDeductType - Get type pattern for deducing against 'auto'. 5875 QualType ASTContext::getAutoDeductType() const { 5876 if (AutoDeductTy.isNull()) 5877 AutoDeductTy = QualType(new (*this, alignof(AutoType)) 5878 AutoType(QualType(), AutoTypeKeyword::Auto, 5879 TypeDependence::None, QualType(), 5880 /*concept*/ nullptr, /*args*/ {}), 5881 0); 5882 return AutoDeductTy; 5883 } 5884 5885 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. 5886 QualType ASTContext::getAutoRRefDeductType() const { 5887 if (AutoRRefDeductTy.isNull()) 5888 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); 5889 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); 5890 return AutoRRefDeductTy; 5891 } 5892 5893 /// getTagDeclType - Return the unique reference to the type for the 5894 /// specified TagDecl (struct/union/class/enum) decl. 5895 QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { 5896 assert(Decl); 5897 // FIXME: What is the design on getTagDeclType when it requires casting 5898 // away const? mutable? 5899 return getTypeDeclType(const_cast<TagDecl*>(Decl)); 5900 } 5901 5902 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result 5903 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and 5904 /// needs to agree with the definition in <stddef.h>. 5905 CanQualType ASTContext::getSizeType() const { 5906 return getFromTargetType(Target->getSizeType()); 5907 } 5908 5909 /// Return the unique signed counterpart of the integer type 5910 /// corresponding to size_t. 5911 CanQualType ASTContext::getSignedSizeType() const { 5912 return getFromTargetType(Target->getSignedSizeType()); 5913 } 5914 5915 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). 5916 CanQualType ASTContext::getIntMaxType() const { 5917 return getFromTargetType(Target->getIntMaxType()); 5918 } 5919 5920 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). 5921 CanQualType ASTContext::getUIntMaxType() const { 5922 return getFromTargetType(Target->getUIntMaxType()); 5923 } 5924 5925 /// getSignedWCharType - Return the type of "signed wchar_t". 5926 /// Used when in C++, as a GCC extension. 5927 QualType ASTContext::getSignedWCharType() const { 5928 // FIXME: derive from "Target" ? 5929 return WCharTy; 5930 } 5931 5932 /// getUnsignedWCharType - Return the type of "unsigned wchar_t". 5933 /// Used when in C++, as a GCC extension. 5934 QualType ASTContext::getUnsignedWCharType() const { 5935 // FIXME: derive from "Target" ? 5936 return UnsignedIntTy; 5937 } 5938 5939 QualType ASTContext::getIntPtrType() const { 5940 return getFromTargetType(Target->getIntPtrType()); 5941 } 5942 5943 QualType ASTContext::getUIntPtrType() const { 5944 return getCorrespondingUnsignedType(getIntPtrType()); 5945 } 5946 5947 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) 5948 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). 5949 QualType ASTContext::getPointerDiffType() const { 5950 return getFromTargetType(Target->getPtrDiffType(LangAS::Default)); 5951 } 5952 5953 /// Return the unique unsigned counterpart of "ptrdiff_t" 5954 /// integer type. The standard (C11 7.21.6.1p7) refers to this type 5955 /// in the definition of %tu format specifier. 5956 QualType ASTContext::getUnsignedPointerDiffType() const { 5957 return getFromTargetType(Target->getUnsignedPtrDiffType(LangAS::Default)); 5958 } 5959 5960 /// Return the unique type for "pid_t" defined in 5961 /// <sys/types.h>. We need this to compute the correct type for vfork(). 5962 QualType ASTContext::getProcessIDType() const { 5963 return getFromTargetType(Target->getProcessIDType()); 5964 } 5965 5966 //===----------------------------------------------------------------------===// 5967 // Type Operators 5968 //===----------------------------------------------------------------------===// 5969 5970 CanQualType ASTContext::getCanonicalParamType(QualType T) const { 5971 // Push qualifiers into arrays, and then discard any remaining 5972 // qualifiers. 5973 T = getCanonicalType(T); 5974 T = getVariableArrayDecayedType(T); 5975 const Type *Ty = T.getTypePtr(); 5976 QualType Result; 5977 if (isa<ArrayType>(Ty)) { 5978 Result = getArrayDecayedType(QualType(Ty,0)); 5979 } else if (isa<FunctionType>(Ty)) { 5980 Result = getPointerType(QualType(Ty, 0)); 5981 } else { 5982 Result = QualType(Ty, 0); 5983 } 5984 5985 return CanQualType::CreateUnsafe(Result); 5986 } 5987 5988 QualType ASTContext::getUnqualifiedArrayType(QualType type, 5989 Qualifiers &quals) { 5990 SplitQualType splitType = type.getSplitUnqualifiedType(); 5991 5992 // FIXME: getSplitUnqualifiedType() actually walks all the way to 5993 // the unqualified desugared type and then drops it on the floor. 5994 // We then have to strip that sugar back off with 5995 // getUnqualifiedDesugaredType(), which is silly. 5996 const auto *AT = 5997 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); 5998 5999 // If we don't have an array, just use the results in splitType. 6000 if (!AT) { 6001 quals = splitType.Quals; 6002 return QualType(splitType.Ty, 0); 6003 } 6004 6005 // Otherwise, recurse on the array's element type. 6006 QualType elementType = AT->getElementType(); 6007 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); 6008 6009 // If that didn't change the element type, AT has no qualifiers, so we 6010 // can just use the results in splitType. 6011 if (elementType == unqualElementType) { 6012 assert(quals.empty()); // from the recursive call 6013 quals = splitType.Quals; 6014 return QualType(splitType.Ty, 0); 6015 } 6016 6017 // Otherwise, add in the qualifiers from the outermost type, then 6018 // build the type back up. 6019 quals.addConsistentQualifiers(splitType.Quals); 6020 6021 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 6022 return getConstantArrayType(unqualElementType, CAT->getSize(), 6023 CAT->getSizeExpr(), CAT->getSizeModifier(), 0); 6024 } 6025 6026 if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) { 6027 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); 6028 } 6029 6030 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) { 6031 return getVariableArrayType(unqualElementType, 6032 VAT->getSizeExpr(), 6033 VAT->getSizeModifier(), 6034 VAT->getIndexTypeCVRQualifiers(), 6035 VAT->getBracketsRange()); 6036 } 6037 6038 const auto *DSAT = cast<DependentSizedArrayType>(AT); 6039 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), 6040 DSAT->getSizeModifier(), 0, 6041 SourceRange()); 6042 } 6043 6044 /// Attempt to unwrap two types that may both be array types with the same bound 6045 /// (or both be array types of unknown bound) for the purpose of comparing the 6046 /// cv-decomposition of two types per C++ [conv.qual]. 6047 /// 6048 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6049 /// C++20 [conv.qual], if permitted by the current language mode. 6050 void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2, 6051 bool AllowPiMismatch) { 6052 while (true) { 6053 auto *AT1 = getAsArrayType(T1); 6054 if (!AT1) 6055 return; 6056 6057 auto *AT2 = getAsArrayType(T2); 6058 if (!AT2) 6059 return; 6060 6061 // If we don't have two array types with the same constant bound nor two 6062 // incomplete array types, we've unwrapped everything we can. 6063 // C++20 also permits one type to be a constant array type and the other 6064 // to be an incomplete array type. 6065 // FIXME: Consider also unwrapping array of unknown bound and VLA. 6066 if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) { 6067 auto *CAT2 = dyn_cast<ConstantArrayType>(AT2); 6068 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) || 6069 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6070 isa<IncompleteArrayType>(AT2)))) 6071 return; 6072 } else if (isa<IncompleteArrayType>(AT1)) { 6073 if (!(isa<IncompleteArrayType>(AT2) || 6074 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6075 isa<ConstantArrayType>(AT2)))) 6076 return; 6077 } else { 6078 return; 6079 } 6080 6081 T1 = AT1->getElementType(); 6082 T2 = AT2->getElementType(); 6083 } 6084 } 6085 6086 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). 6087 /// 6088 /// If T1 and T2 are both pointer types of the same kind, or both array types 6089 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is 6090 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. 6091 /// 6092 /// This function will typically be called in a loop that successively 6093 /// "unwraps" pointer and pointer-to-member types to compare them at each 6094 /// level. 6095 /// 6096 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6097 /// C++20 [conv.qual], if permitted by the current language mode. 6098 /// 6099 /// \return \c true if a pointer type was unwrapped, \c false if we reached a 6100 /// pair of types that can't be unwrapped further. 6101 bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2, 6102 bool AllowPiMismatch) { 6103 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch); 6104 6105 const auto *T1PtrType = T1->getAs<PointerType>(); 6106 const auto *T2PtrType = T2->getAs<PointerType>(); 6107 if (T1PtrType && T2PtrType) { 6108 T1 = T1PtrType->getPointeeType(); 6109 T2 = T2PtrType->getPointeeType(); 6110 return true; 6111 } 6112 6113 const auto *T1MPType = T1->getAs<MemberPointerType>(); 6114 const auto *T2MPType = T2->getAs<MemberPointerType>(); 6115 if (T1MPType && T2MPType && 6116 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), 6117 QualType(T2MPType->getClass(), 0))) { 6118 T1 = T1MPType->getPointeeType(); 6119 T2 = T2MPType->getPointeeType(); 6120 return true; 6121 } 6122 6123 if (getLangOpts().ObjC) { 6124 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>(); 6125 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>(); 6126 if (T1OPType && T2OPType) { 6127 T1 = T1OPType->getPointeeType(); 6128 T2 = T2OPType->getPointeeType(); 6129 return true; 6130 } 6131 } 6132 6133 // FIXME: Block pointers, too? 6134 6135 return false; 6136 } 6137 6138 bool ASTContext::hasSimilarType(QualType T1, QualType T2) { 6139 while (true) { 6140 Qualifiers Quals; 6141 T1 = getUnqualifiedArrayType(T1, Quals); 6142 T2 = getUnqualifiedArrayType(T2, Quals); 6143 if (hasSameType(T1, T2)) 6144 return true; 6145 if (!UnwrapSimilarTypes(T1, T2)) 6146 return false; 6147 } 6148 } 6149 6150 bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { 6151 while (true) { 6152 Qualifiers Quals1, Quals2; 6153 T1 = getUnqualifiedArrayType(T1, Quals1); 6154 T2 = getUnqualifiedArrayType(T2, Quals2); 6155 6156 Quals1.removeCVRQualifiers(); 6157 Quals2.removeCVRQualifiers(); 6158 if (Quals1 != Quals2) 6159 return false; 6160 6161 if (hasSameType(T1, T2)) 6162 return true; 6163 6164 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false)) 6165 return false; 6166 } 6167 } 6168 6169 DeclarationNameInfo 6170 ASTContext::getNameForTemplate(TemplateName Name, 6171 SourceLocation NameLoc) const { 6172 switch (Name.getKind()) { 6173 case TemplateName::QualifiedTemplate: 6174 case TemplateName::Template: 6175 // DNInfo work in progress: CHECKME: what about DNLoc? 6176 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), 6177 NameLoc); 6178 6179 case TemplateName::OverloadedTemplate: { 6180 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); 6181 // DNInfo work in progress: CHECKME: what about DNLoc? 6182 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); 6183 } 6184 6185 case TemplateName::AssumedTemplate: { 6186 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); 6187 return DeclarationNameInfo(Storage->getDeclName(), NameLoc); 6188 } 6189 6190 case TemplateName::DependentTemplate: { 6191 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6192 DeclarationName DName; 6193 if (DTN->isIdentifier()) { 6194 DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); 6195 return DeclarationNameInfo(DName, NameLoc); 6196 } else { 6197 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); 6198 // DNInfo work in progress: FIXME: source locations? 6199 DeclarationNameLoc DNLoc = 6200 DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange()); 6201 return DeclarationNameInfo(DName, NameLoc, DNLoc); 6202 } 6203 } 6204 6205 case TemplateName::SubstTemplateTemplateParm: { 6206 SubstTemplateTemplateParmStorage *subst 6207 = Name.getAsSubstTemplateTemplateParm(); 6208 return DeclarationNameInfo(subst->getParameter()->getDeclName(), 6209 NameLoc); 6210 } 6211 6212 case TemplateName::SubstTemplateTemplateParmPack: { 6213 SubstTemplateTemplateParmPackStorage *subst 6214 = Name.getAsSubstTemplateTemplateParmPack(); 6215 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), 6216 NameLoc); 6217 } 6218 case TemplateName::UsingTemplate: 6219 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(), 6220 NameLoc); 6221 } 6222 6223 llvm_unreachable("bad template name kind!"); 6224 } 6225 6226 TemplateName 6227 ASTContext::getCanonicalTemplateName(const TemplateName &Name) const { 6228 switch (Name.getKind()) { 6229 case TemplateName::UsingTemplate: 6230 case TemplateName::QualifiedTemplate: 6231 case TemplateName::Template: { 6232 TemplateDecl *Template = Name.getAsTemplateDecl(); 6233 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) 6234 Template = getCanonicalTemplateTemplateParmDecl(TTP); 6235 6236 // The canonical template name is the canonical template declaration. 6237 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); 6238 } 6239 6240 case TemplateName::OverloadedTemplate: 6241 case TemplateName::AssumedTemplate: 6242 llvm_unreachable("cannot canonicalize unresolved template"); 6243 6244 case TemplateName::DependentTemplate: { 6245 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6246 assert(DTN && "Non-dependent template names must refer to template decls."); 6247 return DTN->CanonicalTemplateName; 6248 } 6249 6250 case TemplateName::SubstTemplateTemplateParm: { 6251 SubstTemplateTemplateParmStorage *subst 6252 = Name.getAsSubstTemplateTemplateParm(); 6253 return getCanonicalTemplateName(subst->getReplacement()); 6254 } 6255 6256 case TemplateName::SubstTemplateTemplateParmPack: { 6257 SubstTemplateTemplateParmPackStorage *subst = 6258 Name.getAsSubstTemplateTemplateParmPack(); 6259 TemplateArgument canonArgPack = 6260 getCanonicalTemplateArgument(subst->getArgumentPack()); 6261 return getSubstTemplateTemplateParmPack( 6262 canonArgPack, subst->getAssociatedDecl()->getCanonicalDecl(), 6263 subst->getFinal(), subst->getIndex()); 6264 } 6265 } 6266 6267 llvm_unreachable("bad template name!"); 6268 } 6269 6270 bool ASTContext::hasSameTemplateName(const TemplateName &X, 6271 const TemplateName &Y) const { 6272 return getCanonicalTemplateName(X).getAsVoidPointer() == 6273 getCanonicalTemplateName(Y).getAsVoidPointer(); 6274 } 6275 6276 bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const { 6277 if (!XCE != !YCE) 6278 return false; 6279 6280 if (!XCE) 6281 return true; 6282 6283 llvm::FoldingSetNodeID XCEID, YCEID; 6284 XCE->Profile(XCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true); 6285 YCE->Profile(YCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true); 6286 return XCEID == YCEID; 6287 } 6288 6289 bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC, 6290 const TypeConstraint *YTC) const { 6291 if (!XTC != !YTC) 6292 return false; 6293 6294 if (!XTC) 6295 return true; 6296 6297 auto *NCX = XTC->getNamedConcept(); 6298 auto *NCY = YTC->getNamedConcept(); 6299 if (!NCX || !NCY || !isSameEntity(NCX, NCY)) 6300 return false; 6301 if (XTC->getConceptReference()->hasExplicitTemplateArgs() != 6302 YTC->getConceptReference()->hasExplicitTemplateArgs()) 6303 return false; 6304 if (XTC->getConceptReference()->hasExplicitTemplateArgs()) 6305 if (XTC->getConceptReference() 6306 ->getTemplateArgsAsWritten() 6307 ->NumTemplateArgs != 6308 YTC->getConceptReference()->getTemplateArgsAsWritten()->NumTemplateArgs) 6309 return false; 6310 6311 // Compare slowly by profiling. 6312 // 6313 // We couldn't compare the profiling result for the template 6314 // args here. Consider the following example in different modules: 6315 // 6316 // template <__integer_like _Tp, C<_Tp> Sentinel> 6317 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const { 6318 // return __t; 6319 // } 6320 // 6321 // When we compare the profiling result for `C<_Tp>` in different 6322 // modules, it will compare the type of `_Tp` in different modules. 6323 // However, the type of `_Tp` in different modules refer to different 6324 // types here naturally. So we couldn't compare the profiling result 6325 // for the template args directly. 6326 return isSameConstraintExpr(XTC->getImmediatelyDeclaredConstraint(), 6327 YTC->getImmediatelyDeclaredConstraint()); 6328 } 6329 6330 bool ASTContext::isSameTemplateParameter(const NamedDecl *X, 6331 const NamedDecl *Y) const { 6332 if (X->getKind() != Y->getKind()) 6333 return false; 6334 6335 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) { 6336 auto *TY = cast<TemplateTypeParmDecl>(Y); 6337 if (TX->isParameterPack() != TY->isParameterPack()) 6338 return false; 6339 if (TX->hasTypeConstraint() != TY->hasTypeConstraint()) 6340 return false; 6341 return isSameTypeConstraint(TX->getTypeConstraint(), 6342 TY->getTypeConstraint()); 6343 } 6344 6345 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6346 auto *TY = cast<NonTypeTemplateParmDecl>(Y); 6347 return TX->isParameterPack() == TY->isParameterPack() && 6348 TX->getASTContext().hasSameType(TX->getType(), TY->getType()) && 6349 isSameConstraintExpr(TX->getPlaceholderTypeConstraint(), 6350 TY->getPlaceholderTypeConstraint()); 6351 } 6352 6353 auto *TX = cast<TemplateTemplateParmDecl>(X); 6354 auto *TY = cast<TemplateTemplateParmDecl>(Y); 6355 return TX->isParameterPack() == TY->isParameterPack() && 6356 isSameTemplateParameterList(TX->getTemplateParameters(), 6357 TY->getTemplateParameters()); 6358 } 6359 6360 bool ASTContext::isSameTemplateParameterList( 6361 const TemplateParameterList *X, const TemplateParameterList *Y) const { 6362 if (X->size() != Y->size()) 6363 return false; 6364 6365 for (unsigned I = 0, N = X->size(); I != N; ++I) 6366 if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I))) 6367 return false; 6368 6369 return isSameConstraintExpr(X->getRequiresClause(), Y->getRequiresClause()); 6370 } 6371 6372 bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X, 6373 const NamedDecl *Y) const { 6374 // If the type parameter isn't the same already, we don't need to check the 6375 // default argument further. 6376 if (!isSameTemplateParameter(X, Y)) 6377 return false; 6378 6379 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(X)) { 6380 auto *TTPY = cast<TemplateTypeParmDecl>(Y); 6381 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6382 return false; 6383 6384 return hasSameType(TTPX->getDefaultArgument(), TTPY->getDefaultArgument()); 6385 } 6386 6387 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6388 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Y); 6389 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument()) 6390 return false; 6391 6392 Expr *DefaultArgumentX = NTTPX->getDefaultArgument()->IgnoreImpCasts(); 6393 Expr *DefaultArgumentY = NTTPY->getDefaultArgument()->IgnoreImpCasts(); 6394 llvm::FoldingSetNodeID XID, YID; 6395 DefaultArgumentX->Profile(XID, *this, /*Canonical=*/true); 6396 DefaultArgumentY->Profile(YID, *this, /*Canonical=*/true); 6397 return XID == YID; 6398 } 6399 6400 auto *TTPX = cast<TemplateTemplateParmDecl>(X); 6401 auto *TTPY = cast<TemplateTemplateParmDecl>(Y); 6402 6403 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6404 return false; 6405 6406 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument(); 6407 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument(); 6408 return hasSameTemplateName(TAX.getAsTemplate(), TAY.getAsTemplate()); 6409 } 6410 6411 static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) { 6412 if (auto *NS = X->getAsNamespace()) 6413 return NS; 6414 if (auto *NAS = X->getAsNamespaceAlias()) 6415 return NAS->getNamespace(); 6416 return nullptr; 6417 } 6418 6419 static bool isSameQualifier(const NestedNameSpecifier *X, 6420 const NestedNameSpecifier *Y) { 6421 if (auto *NSX = getNamespace(X)) { 6422 auto *NSY = getNamespace(Y); 6423 if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl()) 6424 return false; 6425 } else if (X->getKind() != Y->getKind()) 6426 return false; 6427 6428 // FIXME: For namespaces and types, we're permitted to check that the entity 6429 // is named via the same tokens. We should probably do so. 6430 switch (X->getKind()) { 6431 case NestedNameSpecifier::Identifier: 6432 if (X->getAsIdentifier() != Y->getAsIdentifier()) 6433 return false; 6434 break; 6435 case NestedNameSpecifier::Namespace: 6436 case NestedNameSpecifier::NamespaceAlias: 6437 // We've already checked that we named the same namespace. 6438 break; 6439 case NestedNameSpecifier::TypeSpec: 6440 case NestedNameSpecifier::TypeSpecWithTemplate: 6441 if (X->getAsType()->getCanonicalTypeInternal() != 6442 Y->getAsType()->getCanonicalTypeInternal()) 6443 return false; 6444 break; 6445 case NestedNameSpecifier::Global: 6446 case NestedNameSpecifier::Super: 6447 return true; 6448 } 6449 6450 // Recurse into earlier portion of NNS, if any. 6451 auto *PX = X->getPrefix(); 6452 auto *PY = Y->getPrefix(); 6453 if (PX && PY) 6454 return isSameQualifier(PX, PY); 6455 return !PX && !PY; 6456 } 6457 6458 /// Determine whether the attributes we can overload on are identical for A and 6459 /// B. Will ignore any overloadable attrs represented in the type of A and B. 6460 static bool hasSameOverloadableAttrs(const FunctionDecl *A, 6461 const FunctionDecl *B) { 6462 // Note that pass_object_size attributes are represented in the function's 6463 // ExtParameterInfo, so we don't need to check them here. 6464 6465 llvm::FoldingSetNodeID Cand1ID, Cand2ID; 6466 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>(); 6467 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>(); 6468 6469 for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) { 6470 std::optional<EnableIfAttr *> Cand1A = std::get<0>(Pair); 6471 std::optional<EnableIfAttr *> Cand2A = std::get<1>(Pair); 6472 6473 // Return false if the number of enable_if attributes is different. 6474 if (!Cand1A || !Cand2A) 6475 return false; 6476 6477 Cand1ID.clear(); 6478 Cand2ID.clear(); 6479 6480 (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true); 6481 (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true); 6482 6483 // Return false if any of the enable_if expressions of A and B are 6484 // different. 6485 if (Cand1ID != Cand2ID) 6486 return false; 6487 } 6488 return true; 6489 } 6490 6491 bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const { 6492 // Caution: this function is called by the AST reader during deserialization, 6493 // so it cannot rely on AST invariants being met. Non-trivial accessors 6494 // should be avoided, along with any traversal of redeclaration chains. 6495 6496 if (X == Y) 6497 return true; 6498 6499 if (X->getDeclName() != Y->getDeclName()) 6500 return false; 6501 6502 // Must be in the same context. 6503 // 6504 // Note that we can't use DeclContext::Equals here, because the DeclContexts 6505 // could be two different declarations of the same function. (We will fix the 6506 // semantic DC to refer to the primary definition after merging.) 6507 if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()), 6508 cast<Decl>(Y->getDeclContext()->getRedeclContext()))) 6509 return false; 6510 6511 // Two typedefs refer to the same entity if they have the same underlying 6512 // type. 6513 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X)) 6514 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y)) 6515 return hasSameType(TypedefX->getUnderlyingType(), 6516 TypedefY->getUnderlyingType()); 6517 6518 // Must have the same kind. 6519 if (X->getKind() != Y->getKind()) 6520 return false; 6521 6522 // Objective-C classes and protocols with the same name always match. 6523 if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X)) 6524 return true; 6525 6526 if (isa<ClassTemplateSpecializationDecl>(X)) { 6527 // No need to handle these here: we merge them when adding them to the 6528 // template. 6529 return false; 6530 } 6531 6532 // Compatible tags match. 6533 if (const auto *TagX = dyn_cast<TagDecl>(X)) { 6534 const auto *TagY = cast<TagDecl>(Y); 6535 return (TagX->getTagKind() == TagY->getTagKind()) || 6536 ((TagX->getTagKind() == TagTypeKind::Struct || 6537 TagX->getTagKind() == TagTypeKind::Class || 6538 TagX->getTagKind() == TagTypeKind::Interface) && 6539 (TagY->getTagKind() == TagTypeKind::Struct || 6540 TagY->getTagKind() == TagTypeKind::Class || 6541 TagY->getTagKind() == TagTypeKind::Interface)); 6542 } 6543 6544 // Functions with the same type and linkage match. 6545 // FIXME: This needs to cope with merging of prototyped/non-prototyped 6546 // functions, etc. 6547 if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) { 6548 const auto *FuncY = cast<FunctionDecl>(Y); 6549 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) { 6550 const auto *CtorY = cast<CXXConstructorDecl>(Y); 6551 if (CtorX->getInheritedConstructor() && 6552 !isSameEntity(CtorX->getInheritedConstructor().getConstructor(), 6553 CtorY->getInheritedConstructor().getConstructor())) 6554 return false; 6555 } 6556 6557 if (FuncX->isMultiVersion() != FuncY->isMultiVersion()) 6558 return false; 6559 6560 // Multiversioned functions with different feature strings are represented 6561 // as separate declarations. 6562 if (FuncX->isMultiVersion()) { 6563 const auto *TAX = FuncX->getAttr<TargetAttr>(); 6564 const auto *TAY = FuncY->getAttr<TargetAttr>(); 6565 assert(TAX && TAY && "Multiversion Function without target attribute"); 6566 6567 if (TAX->getFeaturesStr() != TAY->getFeaturesStr()) 6568 return false; 6569 } 6570 6571 // Per C++20 [temp.over.link]/4, friends in different classes are sometimes 6572 // not the same entity if they are constrained. 6573 if ((FuncX->isMemberLikeConstrainedFriend() || 6574 FuncY->isMemberLikeConstrainedFriend()) && 6575 !FuncX->getLexicalDeclContext()->Equals( 6576 FuncY->getLexicalDeclContext())) { 6577 return false; 6578 } 6579 6580 if (!isSameConstraintExpr(FuncX->getTrailingRequiresClause(), 6581 FuncY->getTrailingRequiresClause())) 6582 return false; 6583 6584 auto GetTypeAsWritten = [](const FunctionDecl *FD) { 6585 // Map to the first declaration that we've already merged into this one. 6586 // The TSI of redeclarations might not match (due to calling conventions 6587 // being inherited onto the type but not the TSI), but the TSI type of 6588 // the first declaration of the function should match across modules. 6589 FD = FD->getCanonicalDecl(); 6590 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType() 6591 : FD->getType(); 6592 }; 6593 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY); 6594 if (!hasSameType(XT, YT)) { 6595 // We can get functions with different types on the redecl chain in C++17 6596 // if they have differing exception specifications and at least one of 6597 // the excpetion specs is unresolved. 6598 auto *XFPT = XT->getAs<FunctionProtoType>(); 6599 auto *YFPT = YT->getAs<FunctionProtoType>(); 6600 if (getLangOpts().CPlusPlus17 && XFPT && YFPT && 6601 (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) || 6602 isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) && 6603 hasSameFunctionTypeIgnoringExceptionSpec(XT, YT)) 6604 return true; 6605 return false; 6606 } 6607 6608 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() && 6609 hasSameOverloadableAttrs(FuncX, FuncY); 6610 } 6611 6612 // Variables with the same type and linkage match. 6613 if (const auto *VarX = dyn_cast<VarDecl>(X)) { 6614 const auto *VarY = cast<VarDecl>(Y); 6615 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) { 6616 // During deserialization, we might compare variables before we load 6617 // their types. Assume the types will end up being the same. 6618 if (VarX->getType().isNull() || VarY->getType().isNull()) 6619 return true; 6620 6621 if (hasSameType(VarX->getType(), VarY->getType())) 6622 return true; 6623 6624 // We can get decls with different types on the redecl chain. Eg. 6625 // template <typename T> struct S { static T Var[]; }; // #1 6626 // template <typename T> T S<T>::Var[sizeof(T)]; // #2 6627 // Only? happens when completing an incomplete array type. In this case 6628 // when comparing #1 and #2 we should go through their element type. 6629 const ArrayType *VarXTy = getAsArrayType(VarX->getType()); 6630 const ArrayType *VarYTy = getAsArrayType(VarY->getType()); 6631 if (!VarXTy || !VarYTy) 6632 return false; 6633 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType()) 6634 return hasSameType(VarXTy->getElementType(), VarYTy->getElementType()); 6635 } 6636 return false; 6637 } 6638 6639 // Namespaces with the same name and inlinedness match. 6640 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) { 6641 const auto *NamespaceY = cast<NamespaceDecl>(Y); 6642 return NamespaceX->isInline() == NamespaceY->isInline(); 6643 } 6644 6645 // Identical template names and kinds match if their template parameter lists 6646 // and patterns match. 6647 if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) { 6648 const auto *TemplateY = cast<TemplateDecl>(Y); 6649 6650 // ConceptDecl wouldn't be the same if their constraint expression differs. 6651 if (const auto *ConceptX = dyn_cast<ConceptDecl>(X)) { 6652 const auto *ConceptY = cast<ConceptDecl>(Y); 6653 if (!isSameConstraintExpr(ConceptX->getConstraintExpr(), 6654 ConceptY->getConstraintExpr())) 6655 return false; 6656 } 6657 6658 return isSameEntity(TemplateX->getTemplatedDecl(), 6659 TemplateY->getTemplatedDecl()) && 6660 isSameTemplateParameterList(TemplateX->getTemplateParameters(), 6661 TemplateY->getTemplateParameters()); 6662 } 6663 6664 // Fields with the same name and the same type match. 6665 if (const auto *FDX = dyn_cast<FieldDecl>(X)) { 6666 const auto *FDY = cast<FieldDecl>(Y); 6667 // FIXME: Also check the bitwidth is odr-equivalent, if any. 6668 return hasSameType(FDX->getType(), FDY->getType()); 6669 } 6670 6671 // Indirect fields with the same target field match. 6672 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) { 6673 const auto *IFDY = cast<IndirectFieldDecl>(Y); 6674 return IFDX->getAnonField()->getCanonicalDecl() == 6675 IFDY->getAnonField()->getCanonicalDecl(); 6676 } 6677 6678 // Enumerators with the same name match. 6679 if (isa<EnumConstantDecl>(X)) 6680 // FIXME: Also check the value is odr-equivalent. 6681 return true; 6682 6683 // Using shadow declarations with the same target match. 6684 if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) { 6685 const auto *USY = cast<UsingShadowDecl>(Y); 6686 return USX->getTargetDecl() == USY->getTargetDecl(); 6687 } 6688 6689 // Using declarations with the same qualifier match. (We already know that 6690 // the name matches.) 6691 if (const auto *UX = dyn_cast<UsingDecl>(X)) { 6692 const auto *UY = cast<UsingDecl>(Y); 6693 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6694 UX->hasTypename() == UY->hasTypename() && 6695 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6696 } 6697 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) { 6698 const auto *UY = cast<UnresolvedUsingValueDecl>(Y); 6699 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6700 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6701 } 6702 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) { 6703 return isSameQualifier( 6704 UX->getQualifier(), 6705 cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier()); 6706 } 6707 6708 // Using-pack declarations are only created by instantiation, and match if 6709 // they're instantiated from matching UnresolvedUsing...Decls. 6710 if (const auto *UX = dyn_cast<UsingPackDecl>(X)) { 6711 return declaresSameEntity( 6712 UX->getInstantiatedFromUsingDecl(), 6713 cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl()); 6714 } 6715 6716 // Namespace alias definitions with the same target match. 6717 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) { 6718 const auto *NAY = cast<NamespaceAliasDecl>(Y); 6719 return NAX->getNamespace()->Equals(NAY->getNamespace()); 6720 } 6721 6722 return false; 6723 } 6724 6725 TemplateArgument 6726 ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { 6727 switch (Arg.getKind()) { 6728 case TemplateArgument::Null: 6729 return Arg; 6730 6731 case TemplateArgument::Expression: 6732 return Arg; 6733 6734 case TemplateArgument::Declaration: { 6735 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); 6736 return TemplateArgument(D, getCanonicalType(Arg.getParamTypeForDecl()), 6737 Arg.getIsDefaulted()); 6738 } 6739 6740 case TemplateArgument::NullPtr: 6741 return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), 6742 /*isNullPtr*/ true, Arg.getIsDefaulted()); 6743 6744 case TemplateArgument::Template: 6745 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()), 6746 Arg.getIsDefaulted()); 6747 6748 case TemplateArgument::TemplateExpansion: 6749 return TemplateArgument( 6750 getCanonicalTemplateName(Arg.getAsTemplateOrTemplatePattern()), 6751 Arg.getNumTemplateExpansions(), Arg.getIsDefaulted()); 6752 6753 case TemplateArgument::Integral: 6754 return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); 6755 6756 case TemplateArgument::Type: 6757 return TemplateArgument(getCanonicalType(Arg.getAsType()), 6758 /*isNullPtr*/ false, Arg.getIsDefaulted()); 6759 6760 case TemplateArgument::Pack: { 6761 bool AnyNonCanonArgs = false; 6762 auto CanonArgs = ::getCanonicalTemplateArguments( 6763 *this, Arg.pack_elements(), AnyNonCanonArgs); 6764 if (!AnyNonCanonArgs) 6765 return Arg; 6766 return TemplateArgument::CreatePackCopy(const_cast<ASTContext &>(*this), 6767 CanonArgs); 6768 } 6769 } 6770 6771 // Silence GCC warning 6772 llvm_unreachable("Unhandled template argument kind"); 6773 } 6774 6775 NestedNameSpecifier * 6776 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { 6777 if (!NNS) 6778 return nullptr; 6779 6780 switch (NNS->getKind()) { 6781 case NestedNameSpecifier::Identifier: 6782 // Canonicalize the prefix but keep the identifier the same. 6783 return NestedNameSpecifier::Create(*this, 6784 getCanonicalNestedNameSpecifier(NNS->getPrefix()), 6785 NNS->getAsIdentifier()); 6786 6787 case NestedNameSpecifier::Namespace: 6788 // A namespace is canonical; build a nested-name-specifier with 6789 // this namespace and no prefix. 6790 return NestedNameSpecifier::Create(*this, nullptr, 6791 NNS->getAsNamespace()->getOriginalNamespace()); 6792 6793 case NestedNameSpecifier::NamespaceAlias: 6794 // A namespace is canonical; build a nested-name-specifier with 6795 // this namespace and no prefix. 6796 return NestedNameSpecifier::Create(*this, nullptr, 6797 NNS->getAsNamespaceAlias()->getNamespace() 6798 ->getOriginalNamespace()); 6799 6800 // The difference between TypeSpec and TypeSpecWithTemplate is that the 6801 // latter will have the 'template' keyword when printed. 6802 case NestedNameSpecifier::TypeSpec: 6803 case NestedNameSpecifier::TypeSpecWithTemplate: { 6804 const Type *T = getCanonicalType(NNS->getAsType()); 6805 6806 // If we have some kind of dependent-named type (e.g., "typename T::type"), 6807 // break it apart into its prefix and identifier, then reconsititute those 6808 // as the canonical nested-name-specifier. This is required to canonicalize 6809 // a dependent nested-name-specifier involving typedefs of dependent-name 6810 // types, e.g., 6811 // typedef typename T::type T1; 6812 // typedef typename T1::type T2; 6813 if (const auto *DNT = T->getAs<DependentNameType>()) 6814 return NestedNameSpecifier::Create( 6815 *this, DNT->getQualifier(), 6816 const_cast<IdentifierInfo *>(DNT->getIdentifier())); 6817 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) 6818 return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, 6819 const_cast<Type *>(T)); 6820 6821 // TODO: Set 'Template' parameter to true for other template types. 6822 return NestedNameSpecifier::Create(*this, nullptr, false, 6823 const_cast<Type *>(T)); 6824 } 6825 6826 case NestedNameSpecifier::Global: 6827 case NestedNameSpecifier::Super: 6828 // The global specifier and __super specifer are canonical and unique. 6829 return NNS; 6830 } 6831 6832 llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); 6833 } 6834 6835 const ArrayType *ASTContext::getAsArrayType(QualType T) const { 6836 // Handle the non-qualified case efficiently. 6837 if (!T.hasLocalQualifiers()) { 6838 // Handle the common positive case fast. 6839 if (const auto *AT = dyn_cast<ArrayType>(T)) 6840 return AT; 6841 } 6842 6843 // Handle the common negative case fast. 6844 if (!isa<ArrayType>(T.getCanonicalType())) 6845 return nullptr; 6846 6847 // Apply any qualifiers from the array type to the element type. This 6848 // implements C99 6.7.3p8: "If the specification of an array type includes 6849 // any type qualifiers, the element type is so qualified, not the array type." 6850 6851 // If we get here, we either have type qualifiers on the type, or we have 6852 // sugar such as a typedef in the way. If we have type qualifiers on the type 6853 // we must propagate them down into the element type. 6854 6855 SplitQualType split = T.getSplitDesugaredType(); 6856 Qualifiers qs = split.Quals; 6857 6858 // If we have a simple case, just return now. 6859 const auto *ATy = dyn_cast<ArrayType>(split.Ty); 6860 if (!ATy || qs.empty()) 6861 return ATy; 6862 6863 // Otherwise, we have an array and we have qualifiers on it. Push the 6864 // qualifiers into the array element type and return a new array type. 6865 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); 6866 6867 if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy)) 6868 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), 6869 CAT->getSizeExpr(), 6870 CAT->getSizeModifier(), 6871 CAT->getIndexTypeCVRQualifiers())); 6872 if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy)) 6873 return cast<ArrayType>(getIncompleteArrayType(NewEltTy, 6874 IAT->getSizeModifier(), 6875 IAT->getIndexTypeCVRQualifiers())); 6876 6877 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy)) 6878 return cast<ArrayType>( 6879 getDependentSizedArrayType(NewEltTy, 6880 DSAT->getSizeExpr(), 6881 DSAT->getSizeModifier(), 6882 DSAT->getIndexTypeCVRQualifiers(), 6883 DSAT->getBracketsRange())); 6884 6885 const auto *VAT = cast<VariableArrayType>(ATy); 6886 return cast<ArrayType>(getVariableArrayType(NewEltTy, 6887 VAT->getSizeExpr(), 6888 VAT->getSizeModifier(), 6889 VAT->getIndexTypeCVRQualifiers(), 6890 VAT->getBracketsRange())); 6891 } 6892 6893 QualType ASTContext::getAdjustedParameterType(QualType T) const { 6894 if (T->isArrayType() || T->isFunctionType()) 6895 return getDecayedType(T); 6896 return T; 6897 } 6898 6899 QualType ASTContext::getSignatureParameterType(QualType T) const { 6900 T = getVariableArrayDecayedType(T); 6901 T = getAdjustedParameterType(T); 6902 return T.getUnqualifiedType(); 6903 } 6904 6905 QualType ASTContext::getExceptionObjectType(QualType T) const { 6906 // C++ [except.throw]p3: 6907 // A throw-expression initializes a temporary object, called the exception 6908 // object, the type of which is determined by removing any top-level 6909 // cv-qualifiers from the static type of the operand of throw and adjusting 6910 // the type from "array of T" or "function returning T" to "pointer to T" 6911 // or "pointer to function returning T", [...] 6912 T = getVariableArrayDecayedType(T); 6913 if (T->isArrayType() || T->isFunctionType()) 6914 T = getDecayedType(T); 6915 return T.getUnqualifiedType(); 6916 } 6917 6918 /// getArrayDecayedType - Return the properly qualified result of decaying the 6919 /// specified array type to a pointer. This operation is non-trivial when 6920 /// handling typedefs etc. The canonical type of "T" must be an array type, 6921 /// this returns a pointer to a properly qualified element of the array. 6922 /// 6923 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. 6924 QualType ASTContext::getArrayDecayedType(QualType Ty) const { 6925 // Get the element type with 'getAsArrayType' so that we don't lose any 6926 // typedefs in the element type of the array. This also handles propagation 6927 // of type qualifiers from the array type into the element type if present 6928 // (C99 6.7.3p8). 6929 const ArrayType *PrettyArrayType = getAsArrayType(Ty); 6930 assert(PrettyArrayType && "Not an array type!"); 6931 6932 QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); 6933 6934 // int x[restrict 4] -> int *restrict 6935 QualType Result = getQualifiedType(PtrTy, 6936 PrettyArrayType->getIndexTypeQualifiers()); 6937 6938 // int x[_Nullable] -> int * _Nullable 6939 if (auto Nullability = Ty->getNullability()) { 6940 Result = const_cast<ASTContext *>(this)->getAttributedType( 6941 AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); 6942 } 6943 return Result; 6944 } 6945 6946 QualType ASTContext::getBaseElementType(const ArrayType *array) const { 6947 return getBaseElementType(array->getElementType()); 6948 } 6949 6950 QualType ASTContext::getBaseElementType(QualType type) const { 6951 Qualifiers qs; 6952 while (true) { 6953 SplitQualType split = type.getSplitDesugaredType(); 6954 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); 6955 if (!array) break; 6956 6957 type = array->getElementType(); 6958 qs.addConsistentQualifiers(split.Quals); 6959 } 6960 6961 return getQualifiedType(type, qs); 6962 } 6963 6964 /// getConstantArrayElementCount - Returns number of constant array elements. 6965 uint64_t 6966 ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { 6967 uint64_t ElementCount = 1; 6968 do { 6969 ElementCount *= CA->getSize().getZExtValue(); 6970 CA = dyn_cast_or_null<ConstantArrayType>( 6971 CA->getElementType()->getAsArrayTypeUnsafe()); 6972 } while (CA); 6973 return ElementCount; 6974 } 6975 6976 uint64_t ASTContext::getArrayInitLoopExprElementCount( 6977 const ArrayInitLoopExpr *AILE) const { 6978 if (!AILE) 6979 return 0; 6980 6981 uint64_t ElementCount = 1; 6982 6983 do { 6984 ElementCount *= AILE->getArraySize().getZExtValue(); 6985 AILE = dyn_cast<ArrayInitLoopExpr>(AILE->getSubExpr()); 6986 } while (AILE); 6987 6988 return ElementCount; 6989 } 6990 6991 /// getFloatingRank - Return a relative rank for floating point types. 6992 /// This routine will assert if passed a built-in type that isn't a float. 6993 static FloatingRank getFloatingRank(QualType T) { 6994 if (const auto *CT = T->getAs<ComplexType>()) 6995 return getFloatingRank(CT->getElementType()); 6996 6997 switch (T->castAs<BuiltinType>()->getKind()) { 6998 default: llvm_unreachable("getFloatingRank(): not a floating type"); 6999 case BuiltinType::Float16: return Float16Rank; 7000 case BuiltinType::Half: return HalfRank; 7001 case BuiltinType::Float: return FloatRank; 7002 case BuiltinType::Double: return DoubleRank; 7003 case BuiltinType::LongDouble: return LongDoubleRank; 7004 case BuiltinType::Float128: return Float128Rank; 7005 case BuiltinType::BFloat16: return BFloat16Rank; 7006 case BuiltinType::Ibm128: return Ibm128Rank; 7007 } 7008 } 7009 7010 /// getFloatingTypeOrder - Compare the rank of the two specified floating 7011 /// point types, ignoring the domain of the type (i.e. 'double' == 7012 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If 7013 /// LHS < RHS, return -1. 7014 int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { 7015 FloatingRank LHSR = getFloatingRank(LHS); 7016 FloatingRank RHSR = getFloatingRank(RHS); 7017 7018 if (LHSR == RHSR) 7019 return 0; 7020 if (LHSR > RHSR) 7021 return 1; 7022 return -1; 7023 } 7024 7025 int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { 7026 if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) 7027 return 0; 7028 return getFloatingTypeOrder(LHS, RHS); 7029 } 7030 7031 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This 7032 /// routine will assert if passed a built-in type that isn't an integer or enum, 7033 /// or if it is not canonicalized. 7034 unsigned ASTContext::getIntegerRank(const Type *T) const { 7035 assert(T->isCanonicalUnqualified() && "T should be canonicalized"); 7036 7037 // Results in this 'losing' to any type of the same size, but winning if 7038 // larger. 7039 if (const auto *EIT = dyn_cast<BitIntType>(T)) 7040 return 0 + (EIT->getNumBits() << 3); 7041 7042 switch (cast<BuiltinType>(T)->getKind()) { 7043 default: llvm_unreachable("getIntegerRank(): not a built-in integer"); 7044 case BuiltinType::Bool: 7045 return 1 + (getIntWidth(BoolTy) << 3); 7046 case BuiltinType::Char_S: 7047 case BuiltinType::Char_U: 7048 case BuiltinType::SChar: 7049 case BuiltinType::UChar: 7050 return 2 + (getIntWidth(CharTy) << 3); 7051 case BuiltinType::Short: 7052 case BuiltinType::UShort: 7053 return 3 + (getIntWidth(ShortTy) << 3); 7054 case BuiltinType::Int: 7055 case BuiltinType::UInt: 7056 return 4 + (getIntWidth(IntTy) << 3); 7057 case BuiltinType::Long: 7058 case BuiltinType::ULong: 7059 return 5 + (getIntWidth(LongTy) << 3); 7060 case BuiltinType::LongLong: 7061 case BuiltinType::ULongLong: 7062 return 6 + (getIntWidth(LongLongTy) << 3); 7063 case BuiltinType::Int128: 7064 case BuiltinType::UInt128: 7065 return 7 + (getIntWidth(Int128Ty) << 3); 7066 7067 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of 7068 // their underlying types" [c++20 conv.rank] 7069 case BuiltinType::Char8: 7070 return getIntegerRank(UnsignedCharTy.getTypePtr()); 7071 case BuiltinType::Char16: 7072 return getIntegerRank( 7073 getFromTargetType(Target->getChar16Type()).getTypePtr()); 7074 case BuiltinType::Char32: 7075 return getIntegerRank( 7076 getFromTargetType(Target->getChar32Type()).getTypePtr()); 7077 case BuiltinType::WChar_S: 7078 case BuiltinType::WChar_U: 7079 return getIntegerRank( 7080 getFromTargetType(Target->getWCharType()).getTypePtr()); 7081 } 7082 } 7083 7084 /// Whether this is a promotable bitfield reference according 7085 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). 7086 /// 7087 /// \returns the type this bit-field will promote to, or NULL if no 7088 /// promotion occurs. 7089 QualType ASTContext::isPromotableBitField(Expr *E) const { 7090 if (E->isTypeDependent() || E->isValueDependent()) 7091 return {}; 7092 7093 // C++ [conv.prom]p5: 7094 // If the bit-field has an enumerated type, it is treated as any other 7095 // value of that type for promotion purposes. 7096 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) 7097 return {}; 7098 7099 // FIXME: We should not do this unless E->refersToBitField() is true. This 7100 // matters in C where getSourceBitField() will find bit-fields for various 7101 // cases where the source expression is not a bit-field designator. 7102 7103 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? 7104 if (!Field) 7105 return {}; 7106 7107 QualType FT = Field->getType(); 7108 7109 uint64_t BitWidth = Field->getBitWidthValue(*this); 7110 uint64_t IntSize = getTypeSize(IntTy); 7111 // C++ [conv.prom]p5: 7112 // A prvalue for an integral bit-field can be converted to a prvalue of type 7113 // int if int can represent all the values of the bit-field; otherwise, it 7114 // can be converted to unsigned int if unsigned int can represent all the 7115 // values of the bit-field. If the bit-field is larger yet, no integral 7116 // promotion applies to it. 7117 // C11 6.3.1.1/2: 7118 // [For a bit-field of type _Bool, int, signed int, or unsigned int:] 7119 // If an int can represent all values of the original type (as restricted by 7120 // the width, for a bit-field), the value is converted to an int; otherwise, 7121 // it is converted to an unsigned int. 7122 // 7123 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. 7124 // We perform that promotion here to match GCC and C++. 7125 // FIXME: C does not permit promotion of an enum bit-field whose rank is 7126 // greater than that of 'int'. We perform that promotion to match GCC. 7127 if (BitWidth < IntSize) 7128 return IntTy; 7129 7130 if (BitWidth == IntSize) 7131 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; 7132 7133 // Bit-fields wider than int are not subject to promotions, and therefore act 7134 // like the base type. GCC has some weird bugs in this area that we 7135 // deliberately do not follow (GCC follows a pre-standard resolution to 7136 // C's DR315 which treats bit-width as being part of the type, and this leaks 7137 // into their semantics in some cases). 7138 return {}; 7139 } 7140 7141 /// getPromotedIntegerType - Returns the type that Promotable will 7142 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable 7143 /// integer type. 7144 QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { 7145 assert(!Promotable.isNull()); 7146 assert(isPromotableIntegerType(Promotable)); 7147 if (const auto *ET = Promotable->getAs<EnumType>()) 7148 return ET->getDecl()->getPromotionType(); 7149 7150 if (const auto *BT = Promotable->getAs<BuiltinType>()) { 7151 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t 7152 // (3.9.1) can be converted to a prvalue of the first of the following 7153 // types that can represent all the values of its underlying type: 7154 // int, unsigned int, long int, unsigned long int, long long int, or 7155 // unsigned long long int [...] 7156 // FIXME: Is there some better way to compute this? 7157 if (BT->getKind() == BuiltinType::WChar_S || 7158 BT->getKind() == BuiltinType::WChar_U || 7159 BT->getKind() == BuiltinType::Char8 || 7160 BT->getKind() == BuiltinType::Char16 || 7161 BT->getKind() == BuiltinType::Char32) { 7162 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; 7163 uint64_t FromSize = getTypeSize(BT); 7164 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, 7165 LongLongTy, UnsignedLongLongTy }; 7166 for (const auto &PT : PromoteTypes) { 7167 uint64_t ToSize = getTypeSize(PT); 7168 if (FromSize < ToSize || 7169 (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType())) 7170 return PT; 7171 } 7172 llvm_unreachable("char type should fit into long long"); 7173 } 7174 } 7175 7176 // At this point, we should have a signed or unsigned integer type. 7177 if (Promotable->isSignedIntegerType()) 7178 return IntTy; 7179 uint64_t PromotableSize = getIntWidth(Promotable); 7180 uint64_t IntSize = getIntWidth(IntTy); 7181 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); 7182 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; 7183 } 7184 7185 /// Recurses in pointer/array types until it finds an objc retainable 7186 /// type and returns its ownership. 7187 Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { 7188 while (!T.isNull()) { 7189 if (T.getObjCLifetime() != Qualifiers::OCL_None) 7190 return T.getObjCLifetime(); 7191 if (T->isArrayType()) 7192 T = getBaseElementType(T); 7193 else if (const auto *PT = T->getAs<PointerType>()) 7194 T = PT->getPointeeType(); 7195 else if (const auto *RT = T->getAs<ReferenceType>()) 7196 T = RT->getPointeeType(); 7197 else 7198 break; 7199 } 7200 7201 return Qualifiers::OCL_None; 7202 } 7203 7204 static const Type *getIntegerTypeForEnum(const EnumType *ET) { 7205 // Incomplete enum types are not treated as integer types. 7206 // FIXME: In C++, enum types are never integer types. 7207 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) 7208 return ET->getDecl()->getIntegerType().getTypePtr(); 7209 return nullptr; 7210 } 7211 7212 /// getIntegerTypeOrder - Returns the highest ranked integer type: 7213 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If 7214 /// LHS < RHS, return -1. 7215 int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { 7216 const Type *LHSC = getCanonicalType(LHS).getTypePtr(); 7217 const Type *RHSC = getCanonicalType(RHS).getTypePtr(); 7218 7219 // Unwrap enums to their underlying type. 7220 if (const auto *ET = dyn_cast<EnumType>(LHSC)) 7221 LHSC = getIntegerTypeForEnum(ET); 7222 if (const auto *ET = dyn_cast<EnumType>(RHSC)) 7223 RHSC = getIntegerTypeForEnum(ET); 7224 7225 if (LHSC == RHSC) return 0; 7226 7227 bool LHSUnsigned = LHSC->isUnsignedIntegerType(); 7228 bool RHSUnsigned = RHSC->isUnsignedIntegerType(); 7229 7230 unsigned LHSRank = getIntegerRank(LHSC); 7231 unsigned RHSRank = getIntegerRank(RHSC); 7232 7233 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. 7234 if (LHSRank == RHSRank) return 0; 7235 return LHSRank > RHSRank ? 1 : -1; 7236 } 7237 7238 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. 7239 if (LHSUnsigned) { 7240 // If the unsigned [LHS] type is larger, return it. 7241 if (LHSRank >= RHSRank) 7242 return 1; 7243 7244 // If the signed type can represent all values of the unsigned type, it 7245 // wins. Because we are dealing with 2's complement and types that are 7246 // powers of two larger than each other, this is always safe. 7247 return -1; 7248 } 7249 7250 // If the unsigned [RHS] type is larger, return it. 7251 if (RHSRank >= LHSRank) 7252 return -1; 7253 7254 // If the signed type can represent all values of the unsigned type, it 7255 // wins. Because we are dealing with 2's complement and types that are 7256 // powers of two larger than each other, this is always safe. 7257 return 1; 7258 } 7259 7260 TypedefDecl *ASTContext::getCFConstantStringDecl() const { 7261 if (CFConstantStringTypeDecl) 7262 return CFConstantStringTypeDecl; 7263 7264 assert(!CFConstantStringTagDecl && 7265 "tag and typedef should be initialized together"); 7266 CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); 7267 CFConstantStringTagDecl->startDefinition(); 7268 7269 struct { 7270 QualType Type; 7271 const char *Name; 7272 } Fields[5]; 7273 unsigned Count = 0; 7274 7275 /// Objective-C ABI 7276 /// 7277 /// typedef struct __NSConstantString_tag { 7278 /// const int *isa; 7279 /// int flags; 7280 /// const char *str; 7281 /// long length; 7282 /// } __NSConstantString; 7283 /// 7284 /// Swift ABI (4.1, 4.2) 7285 /// 7286 /// typedef struct __NSConstantString_tag { 7287 /// uintptr_t _cfisa; 7288 /// uintptr_t _swift_rc; 7289 /// _Atomic(uint64_t) _cfinfoa; 7290 /// const char *_ptr; 7291 /// uint32_t _length; 7292 /// } __NSConstantString; 7293 /// 7294 /// Swift ABI (5.0) 7295 /// 7296 /// typedef struct __NSConstantString_tag { 7297 /// uintptr_t _cfisa; 7298 /// uintptr_t _swift_rc; 7299 /// _Atomic(uint64_t) _cfinfoa; 7300 /// const char *_ptr; 7301 /// uintptr_t _length; 7302 /// } __NSConstantString; 7303 7304 const auto CFRuntime = getLangOpts().CFRuntime; 7305 if (static_cast<unsigned>(CFRuntime) < 7306 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) { 7307 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; 7308 Fields[Count++] = { IntTy, "flags" }; 7309 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; 7310 Fields[Count++] = { LongTy, "length" }; 7311 } else { 7312 Fields[Count++] = { getUIntPtrType(), "_cfisa" }; 7313 Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; 7314 Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; 7315 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; 7316 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || 7317 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) 7318 Fields[Count++] = { IntTy, "_ptr" }; 7319 else 7320 Fields[Count++] = { getUIntPtrType(), "_ptr" }; 7321 } 7322 7323 // Create fields 7324 for (unsigned i = 0; i < Count; ++i) { 7325 FieldDecl *Field = 7326 FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), 7327 SourceLocation(), &Idents.get(Fields[i].Name), 7328 Fields[i].Type, /*TInfo=*/nullptr, 7329 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7330 Field->setAccess(AS_public); 7331 CFConstantStringTagDecl->addDecl(Field); 7332 } 7333 7334 CFConstantStringTagDecl->completeDefinition(); 7335 // This type is designed to be compatible with NSConstantString, but cannot 7336 // use the same name, since NSConstantString is an interface. 7337 auto tagType = getTagDeclType(CFConstantStringTagDecl); 7338 CFConstantStringTypeDecl = 7339 buildImplicitTypedef(tagType, "__NSConstantString"); 7340 7341 return CFConstantStringTypeDecl; 7342 } 7343 7344 RecordDecl *ASTContext::getCFConstantStringTagDecl() const { 7345 if (!CFConstantStringTagDecl) 7346 getCFConstantStringDecl(); // Build the tag and the typedef. 7347 return CFConstantStringTagDecl; 7348 } 7349 7350 // getCFConstantStringType - Return the type used for constant CFStrings. 7351 QualType ASTContext::getCFConstantStringType() const { 7352 return getTypedefType(getCFConstantStringDecl()); 7353 } 7354 7355 QualType ASTContext::getObjCSuperType() const { 7356 if (ObjCSuperType.isNull()) { 7357 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); 7358 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl); 7359 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); 7360 } 7361 return ObjCSuperType; 7362 } 7363 7364 void ASTContext::setCFConstantStringType(QualType T) { 7365 const auto *TD = T->castAs<TypedefType>(); 7366 CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl()); 7367 const auto *TagType = 7368 CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>(); 7369 CFConstantStringTagDecl = TagType->getDecl(); 7370 } 7371 7372 QualType ASTContext::getBlockDescriptorType() const { 7373 if (BlockDescriptorType) 7374 return getTagDeclType(BlockDescriptorType); 7375 7376 RecordDecl *RD; 7377 // FIXME: Needs the FlagAppleBlock bit. 7378 RD = buildImplicitRecord("__block_descriptor"); 7379 RD->startDefinition(); 7380 7381 QualType FieldTypes[] = { 7382 UnsignedLongTy, 7383 UnsignedLongTy, 7384 }; 7385 7386 static const char *const FieldNames[] = { 7387 "reserved", 7388 "Size" 7389 }; 7390 7391 for (size_t i = 0; i < 2; ++i) { 7392 FieldDecl *Field = FieldDecl::Create( 7393 *this, RD, SourceLocation(), SourceLocation(), 7394 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7395 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7396 Field->setAccess(AS_public); 7397 RD->addDecl(Field); 7398 } 7399 7400 RD->completeDefinition(); 7401 7402 BlockDescriptorType = RD; 7403 7404 return getTagDeclType(BlockDescriptorType); 7405 } 7406 7407 QualType ASTContext::getBlockDescriptorExtendedType() const { 7408 if (BlockDescriptorExtendedType) 7409 return getTagDeclType(BlockDescriptorExtendedType); 7410 7411 RecordDecl *RD; 7412 // FIXME: Needs the FlagAppleBlock bit. 7413 RD = buildImplicitRecord("__block_descriptor_withcopydispose"); 7414 RD->startDefinition(); 7415 7416 QualType FieldTypes[] = { 7417 UnsignedLongTy, 7418 UnsignedLongTy, 7419 getPointerType(VoidPtrTy), 7420 getPointerType(VoidPtrTy) 7421 }; 7422 7423 static const char *const FieldNames[] = { 7424 "reserved", 7425 "Size", 7426 "CopyFuncPtr", 7427 "DestroyFuncPtr" 7428 }; 7429 7430 for (size_t i = 0; i < 4; ++i) { 7431 FieldDecl *Field = FieldDecl::Create( 7432 *this, RD, SourceLocation(), SourceLocation(), 7433 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7434 /*BitWidth=*/nullptr, 7435 /*Mutable=*/false, ICIS_NoInit); 7436 Field->setAccess(AS_public); 7437 RD->addDecl(Field); 7438 } 7439 7440 RD->completeDefinition(); 7441 7442 BlockDescriptorExtendedType = RD; 7443 return getTagDeclType(BlockDescriptorExtendedType); 7444 } 7445 7446 OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { 7447 const auto *BT = dyn_cast<BuiltinType>(T); 7448 7449 if (!BT) { 7450 if (isa<PipeType>(T)) 7451 return OCLTK_Pipe; 7452 7453 return OCLTK_Default; 7454 } 7455 7456 switch (BT->getKind()) { 7457 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7458 case BuiltinType::Id: \ 7459 return OCLTK_Image; 7460 #include "clang/Basic/OpenCLImageTypes.def" 7461 7462 case BuiltinType::OCLClkEvent: 7463 return OCLTK_ClkEvent; 7464 7465 case BuiltinType::OCLEvent: 7466 return OCLTK_Event; 7467 7468 case BuiltinType::OCLQueue: 7469 return OCLTK_Queue; 7470 7471 case BuiltinType::OCLReserveID: 7472 return OCLTK_ReserveID; 7473 7474 case BuiltinType::OCLSampler: 7475 return OCLTK_Sampler; 7476 7477 default: 7478 return OCLTK_Default; 7479 } 7480 } 7481 7482 LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { 7483 return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 7484 } 7485 7486 /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" 7487 /// requires copy/dispose. Note that this must match the logic 7488 /// in buildByrefHelpers. 7489 bool ASTContext::BlockRequiresCopying(QualType Ty, 7490 const VarDecl *D) { 7491 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { 7492 const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); 7493 if (!copyExpr && record->hasTrivialDestructor()) return false; 7494 7495 return true; 7496 } 7497 7498 // The block needs copy/destroy helpers if Ty is non-trivial to destructively 7499 // move or destroy. 7500 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) 7501 return true; 7502 7503 if (!Ty->isObjCRetainableType()) return false; 7504 7505 Qualifiers qs = Ty.getQualifiers(); 7506 7507 // If we have lifetime, that dominates. 7508 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { 7509 switch (lifetime) { 7510 case Qualifiers::OCL_None: llvm_unreachable("impossible"); 7511 7512 // These are just bits as far as the runtime is concerned. 7513 case Qualifiers::OCL_ExplicitNone: 7514 case Qualifiers::OCL_Autoreleasing: 7515 return false; 7516 7517 // These cases should have been taken care of when checking the type's 7518 // non-triviality. 7519 case Qualifiers::OCL_Weak: 7520 case Qualifiers::OCL_Strong: 7521 llvm_unreachable("impossible"); 7522 } 7523 llvm_unreachable("fell out of lifetime switch!"); 7524 } 7525 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || 7526 Ty->isObjCObjectPointerType()); 7527 } 7528 7529 bool ASTContext::getByrefLifetime(QualType Ty, 7530 Qualifiers::ObjCLifetime &LifeTime, 7531 bool &HasByrefExtendedLayout) const { 7532 if (!getLangOpts().ObjC || 7533 getLangOpts().getGC() != LangOptions::NonGC) 7534 return false; 7535 7536 HasByrefExtendedLayout = false; 7537 if (Ty->isRecordType()) { 7538 HasByrefExtendedLayout = true; 7539 LifeTime = Qualifiers::OCL_None; 7540 } else if ((LifeTime = Ty.getObjCLifetime())) { 7541 // Honor the ARC qualifiers. 7542 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { 7543 // The MRR rule. 7544 LifeTime = Qualifiers::OCL_ExplicitNone; 7545 } else { 7546 LifeTime = Qualifiers::OCL_None; 7547 } 7548 return true; 7549 } 7550 7551 CanQualType ASTContext::getNSUIntegerType() const { 7552 assert(Target && "Expected target to be initialized"); 7553 const llvm::Triple &T = Target->getTriple(); 7554 // Windows is LLP64 rather than LP64 7555 if (T.isOSWindows() && T.isArch64Bit()) 7556 return UnsignedLongLongTy; 7557 return UnsignedLongTy; 7558 } 7559 7560 CanQualType ASTContext::getNSIntegerType() const { 7561 assert(Target && "Expected target to be initialized"); 7562 const llvm::Triple &T = Target->getTriple(); 7563 // Windows is LLP64 rather than LP64 7564 if (T.isOSWindows() && T.isArch64Bit()) 7565 return LongLongTy; 7566 return LongTy; 7567 } 7568 7569 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { 7570 if (!ObjCInstanceTypeDecl) 7571 ObjCInstanceTypeDecl = 7572 buildImplicitTypedef(getObjCIdType(), "instancetype"); 7573 return ObjCInstanceTypeDecl; 7574 } 7575 7576 // This returns true if a type has been typedefed to BOOL: 7577 // typedef <type> BOOL; 7578 static bool isTypeTypedefedAsBOOL(QualType T) { 7579 if (const auto *TT = dyn_cast<TypedefType>(T)) 7580 if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) 7581 return II->isStr("BOOL"); 7582 7583 return false; 7584 } 7585 7586 /// getObjCEncodingTypeSize returns size of type for objective-c encoding 7587 /// purpose. 7588 CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { 7589 if (!type->isIncompleteArrayType() && type->isIncompleteType()) 7590 return CharUnits::Zero(); 7591 7592 CharUnits sz = getTypeSizeInChars(type); 7593 7594 // Make all integer and enum types at least as large as an int 7595 if (sz.isPositive() && type->isIntegralOrEnumerationType()) 7596 sz = std::max(sz, getTypeSizeInChars(IntTy)); 7597 // Treat arrays as pointers, since that's how they're passed in. 7598 else if (type->isArrayType()) 7599 sz = getTypeSizeInChars(VoidPtrTy); 7600 return sz; 7601 } 7602 7603 bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { 7604 return getTargetInfo().getCXXABI().isMicrosoft() && 7605 VD->isStaticDataMember() && 7606 VD->getType()->isIntegralOrEnumerationType() && 7607 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); 7608 } 7609 7610 ASTContext::InlineVariableDefinitionKind 7611 ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { 7612 if (!VD->isInline()) 7613 return InlineVariableDefinitionKind::None; 7614 7615 // In almost all cases, it's a weak definition. 7616 auto *First = VD->getFirstDecl(); 7617 if (First->isInlineSpecified() || !First->isStaticDataMember()) 7618 return InlineVariableDefinitionKind::Weak; 7619 7620 // If there's a file-context declaration in this translation unit, it's a 7621 // non-discardable definition. 7622 for (auto *D : VD->redecls()) 7623 if (D->getLexicalDeclContext()->isFileContext() && 7624 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) 7625 return InlineVariableDefinitionKind::Strong; 7626 7627 // If we've not seen one yet, we don't know. 7628 return InlineVariableDefinitionKind::WeakUnknown; 7629 } 7630 7631 static std::string charUnitsToString(const CharUnits &CU) { 7632 return llvm::itostr(CU.getQuantity()); 7633 } 7634 7635 /// getObjCEncodingForBlock - Return the encoded type for this block 7636 /// declaration. 7637 std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { 7638 std::string S; 7639 7640 const BlockDecl *Decl = Expr->getBlockDecl(); 7641 QualType BlockTy = 7642 Expr->getType()->castAs<BlockPointerType>()->getPointeeType(); 7643 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType(); 7644 // Encode result type. 7645 if (getLangOpts().EncodeExtendedBlockSig) 7646 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, 7647 true /*Extended*/); 7648 else 7649 getObjCEncodingForType(BlockReturnTy, S); 7650 // Compute size of all parameters. 7651 // Start with computing size of a pointer in number of bytes. 7652 // FIXME: There might(should) be a better way of doing this computation! 7653 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7654 CharUnits ParmOffset = PtrSize; 7655 for (auto *PI : Decl->parameters()) { 7656 QualType PType = PI->getType(); 7657 CharUnits sz = getObjCEncodingTypeSize(PType); 7658 if (sz.isZero()) 7659 continue; 7660 assert(sz.isPositive() && "BlockExpr - Incomplete param type"); 7661 ParmOffset += sz; 7662 } 7663 // Size of the argument frame 7664 S += charUnitsToString(ParmOffset); 7665 // Block pointer and offset. 7666 S += "@?0"; 7667 7668 // Argument types. 7669 ParmOffset = PtrSize; 7670 for (auto *PVDecl : Decl->parameters()) { 7671 QualType PType = PVDecl->getOriginalType(); 7672 if (const auto *AT = 7673 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7674 // Use array's original type only if it has known number of 7675 // elements. 7676 if (!isa<ConstantArrayType>(AT)) 7677 PType = PVDecl->getType(); 7678 } else if (PType->isFunctionType()) 7679 PType = PVDecl->getType(); 7680 if (getLangOpts().EncodeExtendedBlockSig) 7681 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, 7682 S, true /*Extended*/); 7683 else 7684 getObjCEncodingForType(PType, S); 7685 S += charUnitsToString(ParmOffset); 7686 ParmOffset += getObjCEncodingTypeSize(PType); 7687 } 7688 7689 return S; 7690 } 7691 7692 std::string 7693 ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { 7694 std::string S; 7695 // Encode result type. 7696 getObjCEncodingForType(Decl->getReturnType(), S); 7697 CharUnits ParmOffset; 7698 // Compute size of all parameters. 7699 for (auto *PI : Decl->parameters()) { 7700 QualType PType = PI->getType(); 7701 CharUnits sz = getObjCEncodingTypeSize(PType); 7702 if (sz.isZero()) 7703 continue; 7704 7705 assert(sz.isPositive() && 7706 "getObjCEncodingForFunctionDecl - Incomplete param type"); 7707 ParmOffset += sz; 7708 } 7709 S += charUnitsToString(ParmOffset); 7710 ParmOffset = CharUnits::Zero(); 7711 7712 // Argument types. 7713 for (auto *PVDecl : Decl->parameters()) { 7714 QualType PType = PVDecl->getOriginalType(); 7715 if (const auto *AT = 7716 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7717 // Use array's original type only if it has known number of 7718 // elements. 7719 if (!isa<ConstantArrayType>(AT)) 7720 PType = PVDecl->getType(); 7721 } else if (PType->isFunctionType()) 7722 PType = PVDecl->getType(); 7723 getObjCEncodingForType(PType, S); 7724 S += charUnitsToString(ParmOffset); 7725 ParmOffset += getObjCEncodingTypeSize(PType); 7726 } 7727 7728 return S; 7729 } 7730 7731 /// getObjCEncodingForMethodParameter - Return the encoded type for a single 7732 /// method parameter or return type. If Extended, include class names and 7733 /// block object types. 7734 void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, 7735 QualType T, std::string& S, 7736 bool Extended) const { 7737 // Encode type qualifier, 'in', 'inout', etc. for the parameter. 7738 getObjCEncodingForTypeQualifier(QT, S); 7739 // Encode parameter type. 7740 ObjCEncOptions Options = ObjCEncOptions() 7741 .setExpandPointedToStructures() 7742 .setExpandStructures() 7743 .setIsOutermostType(); 7744 if (Extended) 7745 Options.setEncodeBlockParameters().setEncodeClassNames(); 7746 getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); 7747 } 7748 7749 /// getObjCEncodingForMethodDecl - Return the encoded type for this method 7750 /// declaration. 7751 std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, 7752 bool Extended) const { 7753 // FIXME: This is not very efficient. 7754 // Encode return type. 7755 std::string S; 7756 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), 7757 Decl->getReturnType(), S, Extended); 7758 // Compute size of all parameters. 7759 // Start with computing size of a pointer in number of bytes. 7760 // FIXME: There might(should) be a better way of doing this computation! 7761 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7762 // The first two arguments (self and _cmd) are pointers; account for 7763 // their size. 7764 CharUnits ParmOffset = 2 * PtrSize; 7765 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7766 E = Decl->sel_param_end(); PI != E; ++PI) { 7767 QualType PType = (*PI)->getType(); 7768 CharUnits sz = getObjCEncodingTypeSize(PType); 7769 if (sz.isZero()) 7770 continue; 7771 7772 assert(sz.isPositive() && 7773 "getObjCEncodingForMethodDecl - Incomplete param type"); 7774 ParmOffset += sz; 7775 } 7776 S += charUnitsToString(ParmOffset); 7777 S += "@0:"; 7778 S += charUnitsToString(PtrSize); 7779 7780 // Argument types. 7781 ParmOffset = 2 * PtrSize; 7782 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7783 E = Decl->sel_param_end(); PI != E; ++PI) { 7784 const ParmVarDecl *PVDecl = *PI; 7785 QualType PType = PVDecl->getOriginalType(); 7786 if (const auto *AT = 7787 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7788 // Use array's original type only if it has known number of 7789 // elements. 7790 if (!isa<ConstantArrayType>(AT)) 7791 PType = PVDecl->getType(); 7792 } else if (PType->isFunctionType()) 7793 PType = PVDecl->getType(); 7794 getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), 7795 PType, S, Extended); 7796 S += charUnitsToString(ParmOffset); 7797 ParmOffset += getObjCEncodingTypeSize(PType); 7798 } 7799 7800 return S; 7801 } 7802 7803 ObjCPropertyImplDecl * 7804 ASTContext::getObjCPropertyImplDeclForPropertyDecl( 7805 const ObjCPropertyDecl *PD, 7806 const Decl *Container) const { 7807 if (!Container) 7808 return nullptr; 7809 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) { 7810 for (auto *PID : CID->property_impls()) 7811 if (PID->getPropertyDecl() == PD) 7812 return PID; 7813 } else { 7814 const auto *OID = cast<ObjCImplementationDecl>(Container); 7815 for (auto *PID : OID->property_impls()) 7816 if (PID->getPropertyDecl() == PD) 7817 return PID; 7818 } 7819 return nullptr; 7820 } 7821 7822 /// getObjCEncodingForPropertyDecl - Return the encoded type for this 7823 /// property declaration. If non-NULL, Container must be either an 7824 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be 7825 /// NULL when getting encodings for protocol properties. 7826 /// Property attributes are stored as a comma-delimited C string. The simple 7827 /// attributes readonly and bycopy are encoded as single characters. The 7828 /// parametrized attributes, getter=name, setter=name, and ivar=name, are 7829 /// encoded as single characters, followed by an identifier. Property types 7830 /// are also encoded as a parametrized attribute. The characters used to encode 7831 /// these attributes are defined by the following enumeration: 7832 /// @code 7833 /// enum PropertyAttributes { 7834 /// kPropertyReadOnly = 'R', // property is read-only. 7835 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned 7836 /// kPropertyByref = '&', // property is a reference to the value last assigned 7837 /// kPropertyDynamic = 'D', // property is dynamic 7838 /// kPropertyGetter = 'G', // followed by getter selector name 7839 /// kPropertySetter = 'S', // followed by setter selector name 7840 /// kPropertyInstanceVariable = 'V' // followed by instance variable name 7841 /// kPropertyType = 'T' // followed by old-style type encoding. 7842 /// kPropertyWeak = 'W' // 'weak' property 7843 /// kPropertyStrong = 'P' // property GC'able 7844 /// kPropertyNonAtomic = 'N' // property non-atomic 7845 /// kPropertyOptional = '?' // property optional 7846 /// }; 7847 /// @endcode 7848 std::string 7849 ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, 7850 const Decl *Container) const { 7851 // Collect information from the property implementation decl(s). 7852 bool Dynamic = false; 7853 ObjCPropertyImplDecl *SynthesizePID = nullptr; 7854 7855 if (ObjCPropertyImplDecl *PropertyImpDecl = 7856 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { 7857 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) 7858 Dynamic = true; 7859 else 7860 SynthesizePID = PropertyImpDecl; 7861 } 7862 7863 // FIXME: This is not very efficient. 7864 std::string S = "T"; 7865 7866 // Encode result type. 7867 // GCC has some special rules regarding encoding of properties which 7868 // closely resembles encoding of ivars. 7869 getObjCEncodingForPropertyType(PD->getType(), S); 7870 7871 if (PD->isOptional()) 7872 S += ",?"; 7873 7874 if (PD->isReadOnly()) { 7875 S += ",R"; 7876 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) 7877 S += ",C"; 7878 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) 7879 S += ",&"; 7880 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) 7881 S += ",W"; 7882 } else { 7883 switch (PD->getSetterKind()) { 7884 case ObjCPropertyDecl::Assign: break; 7885 case ObjCPropertyDecl::Copy: S += ",C"; break; 7886 case ObjCPropertyDecl::Retain: S += ",&"; break; 7887 case ObjCPropertyDecl::Weak: S += ",W"; break; 7888 } 7889 } 7890 7891 // It really isn't clear at all what this means, since properties 7892 // are "dynamic by default". 7893 if (Dynamic) 7894 S += ",D"; 7895 7896 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) 7897 S += ",N"; 7898 7899 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { 7900 S += ",G"; 7901 S += PD->getGetterName().getAsString(); 7902 } 7903 7904 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { 7905 S += ",S"; 7906 S += PD->getSetterName().getAsString(); 7907 } 7908 7909 if (SynthesizePID) { 7910 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); 7911 S += ",V"; 7912 S += OID->getNameAsString(); 7913 } 7914 7915 // FIXME: OBJCGC: weak & strong 7916 return S; 7917 } 7918 7919 /// getLegacyIntegralTypeEncoding - 7920 /// Another legacy compatibility encoding: 32-bit longs are encoded as 7921 /// 'l' or 'L' , but not always. For typedefs, we need to use 7922 /// 'i' or 'I' instead if encoding a struct field, or a pointer! 7923 void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { 7924 if (PointeeTy->getAs<TypedefType>()) { 7925 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) { 7926 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) 7927 PointeeTy = UnsignedIntTy; 7928 else 7929 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) 7930 PointeeTy = IntTy; 7931 } 7932 } 7933 } 7934 7935 void ASTContext::getObjCEncodingForType(QualType T, std::string& S, 7936 const FieldDecl *Field, 7937 QualType *NotEncodedT) const { 7938 // We follow the behavior of gcc, expanding structures which are 7939 // directly pointed to, and expanding embedded structures. Note that 7940 // these rules are sufficient to prevent recursive encoding of the 7941 // same type. 7942 getObjCEncodingForTypeImpl(T, S, 7943 ObjCEncOptions() 7944 .setExpandPointedToStructures() 7945 .setExpandStructures() 7946 .setIsOutermostType(), 7947 Field, NotEncodedT); 7948 } 7949 7950 void ASTContext::getObjCEncodingForPropertyType(QualType T, 7951 std::string& S) const { 7952 // Encode result type. 7953 // GCC has some special rules regarding encoding of properties which 7954 // closely resembles encoding of ivars. 7955 getObjCEncodingForTypeImpl(T, S, 7956 ObjCEncOptions() 7957 .setExpandPointedToStructures() 7958 .setExpandStructures() 7959 .setIsOutermostType() 7960 .setEncodingProperty(), 7961 /*Field=*/nullptr); 7962 } 7963 7964 static char getObjCEncodingForPrimitiveType(const ASTContext *C, 7965 const BuiltinType *BT) { 7966 BuiltinType::Kind kind = BT->getKind(); 7967 switch (kind) { 7968 case BuiltinType::Void: return 'v'; 7969 case BuiltinType::Bool: return 'B'; 7970 case BuiltinType::Char8: 7971 case BuiltinType::Char_U: 7972 case BuiltinType::UChar: return 'C'; 7973 case BuiltinType::Char16: 7974 case BuiltinType::UShort: return 'S'; 7975 case BuiltinType::Char32: 7976 case BuiltinType::UInt: return 'I'; 7977 case BuiltinType::ULong: 7978 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; 7979 case BuiltinType::UInt128: return 'T'; 7980 case BuiltinType::ULongLong: return 'Q'; 7981 case BuiltinType::Char_S: 7982 case BuiltinType::SChar: return 'c'; 7983 case BuiltinType::Short: return 's'; 7984 case BuiltinType::WChar_S: 7985 case BuiltinType::WChar_U: 7986 case BuiltinType::Int: return 'i'; 7987 case BuiltinType::Long: 7988 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; 7989 case BuiltinType::LongLong: return 'q'; 7990 case BuiltinType::Int128: return 't'; 7991 case BuiltinType::Float: return 'f'; 7992 case BuiltinType::Double: return 'd'; 7993 case BuiltinType::LongDouble: return 'D'; 7994 case BuiltinType::NullPtr: return '*'; // like char* 7995 7996 case BuiltinType::BFloat16: 7997 case BuiltinType::Float16: 7998 case BuiltinType::Float128: 7999 case BuiltinType::Ibm128: 8000 case BuiltinType::Half: 8001 case BuiltinType::ShortAccum: 8002 case BuiltinType::Accum: 8003 case BuiltinType::LongAccum: 8004 case BuiltinType::UShortAccum: 8005 case BuiltinType::UAccum: 8006 case BuiltinType::ULongAccum: 8007 case BuiltinType::ShortFract: 8008 case BuiltinType::Fract: 8009 case BuiltinType::LongFract: 8010 case BuiltinType::UShortFract: 8011 case BuiltinType::UFract: 8012 case BuiltinType::ULongFract: 8013 case BuiltinType::SatShortAccum: 8014 case BuiltinType::SatAccum: 8015 case BuiltinType::SatLongAccum: 8016 case BuiltinType::SatUShortAccum: 8017 case BuiltinType::SatUAccum: 8018 case BuiltinType::SatULongAccum: 8019 case BuiltinType::SatShortFract: 8020 case BuiltinType::SatFract: 8021 case BuiltinType::SatLongFract: 8022 case BuiltinType::SatUShortFract: 8023 case BuiltinType::SatUFract: 8024 case BuiltinType::SatULongFract: 8025 // FIXME: potentially need @encodes for these! 8026 return ' '; 8027 8028 #define SVE_TYPE(Name, Id, SingletonId) \ 8029 case BuiltinType::Id: 8030 #include "clang/Basic/AArch64SVEACLETypes.def" 8031 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 8032 #include "clang/Basic/RISCVVTypes.def" 8033 #define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 8034 #include "clang/Basic/WebAssemblyReferenceTypes.def" 8035 { 8036 DiagnosticsEngine &Diags = C->getDiagnostics(); 8037 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, 8038 "cannot yet @encode type %0"); 8039 Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); 8040 return ' '; 8041 } 8042 8043 case BuiltinType::ObjCId: 8044 case BuiltinType::ObjCClass: 8045 case BuiltinType::ObjCSel: 8046 llvm_unreachable("@encoding ObjC primitive type"); 8047 8048 // OpenCL and placeholder types don't need @encodings. 8049 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 8050 case BuiltinType::Id: 8051 #include "clang/Basic/OpenCLImageTypes.def" 8052 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 8053 case BuiltinType::Id: 8054 #include "clang/Basic/OpenCLExtensionTypes.def" 8055 case BuiltinType::OCLEvent: 8056 case BuiltinType::OCLClkEvent: 8057 case BuiltinType::OCLQueue: 8058 case BuiltinType::OCLReserveID: 8059 case BuiltinType::OCLSampler: 8060 case BuiltinType::Dependent: 8061 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 8062 case BuiltinType::Id: 8063 #include "clang/Basic/PPCTypes.def" 8064 #define BUILTIN_TYPE(KIND, ID) 8065 #define PLACEHOLDER_TYPE(KIND, ID) \ 8066 case BuiltinType::KIND: 8067 #include "clang/AST/BuiltinTypes.def" 8068 llvm_unreachable("invalid builtin type for @encode"); 8069 } 8070 llvm_unreachable("invalid BuiltinType::Kind value"); 8071 } 8072 8073 static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { 8074 EnumDecl *Enum = ET->getDecl(); 8075 8076 // The encoding of an non-fixed enum type is always 'i', regardless of size. 8077 if (!Enum->isFixed()) 8078 return 'i'; 8079 8080 // The encoding of a fixed enum type matches its fixed underlying type. 8081 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>(); 8082 return getObjCEncodingForPrimitiveType(C, BT); 8083 } 8084 8085 static void EncodeBitField(const ASTContext *Ctx, std::string& S, 8086 QualType T, const FieldDecl *FD) { 8087 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); 8088 S += 'b'; 8089 // The NeXT runtime encodes bit fields as b followed by the number of bits. 8090 // The GNU runtime requires more information; bitfields are encoded as b, 8091 // then the offset (in bits) of the first element, then the type of the 8092 // bitfield, then the size in bits. For example, in this structure: 8093 // 8094 // struct 8095 // { 8096 // int integer; 8097 // int flags:2; 8098 // }; 8099 // On a 32-bit system, the encoding for flags would be b2 for the NeXT 8100 // runtime, but b32i2 for the GNU runtime. The reason for this extra 8101 // information is not especially sensible, but we're stuck with it for 8102 // compatibility with GCC, although providing it breaks anything that 8103 // actually uses runtime introspection and wants to work on both runtimes... 8104 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { 8105 uint64_t Offset; 8106 8107 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) { 8108 Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, 8109 IVD); 8110 } else { 8111 const RecordDecl *RD = FD->getParent(); 8112 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); 8113 Offset = RL.getFieldOffset(FD->getFieldIndex()); 8114 } 8115 8116 S += llvm::utostr(Offset); 8117 8118 if (const auto *ET = T->getAs<EnumType>()) 8119 S += ObjCEncodingForEnumType(Ctx, ET); 8120 else { 8121 const auto *BT = T->castAs<BuiltinType>(); 8122 S += getObjCEncodingForPrimitiveType(Ctx, BT); 8123 } 8124 } 8125 S += llvm::utostr(FD->getBitWidthValue(*Ctx)); 8126 } 8127 8128 // Helper function for determining whether the encoded type string would include 8129 // a template specialization type. 8130 static bool hasTemplateSpecializationInEncodedString(const Type *T, 8131 bool VisitBasesAndFields) { 8132 T = T->getBaseElementTypeUnsafe(); 8133 8134 if (auto *PT = T->getAs<PointerType>()) 8135 return hasTemplateSpecializationInEncodedString( 8136 PT->getPointeeType().getTypePtr(), false); 8137 8138 auto *CXXRD = T->getAsCXXRecordDecl(); 8139 8140 if (!CXXRD) 8141 return false; 8142 8143 if (isa<ClassTemplateSpecializationDecl>(CXXRD)) 8144 return true; 8145 8146 if (!CXXRD->hasDefinition() || !VisitBasesAndFields) 8147 return false; 8148 8149 for (const auto &B : CXXRD->bases()) 8150 if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), 8151 true)) 8152 return true; 8153 8154 for (auto *FD : CXXRD->fields()) 8155 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(), 8156 true)) 8157 return true; 8158 8159 return false; 8160 } 8161 8162 // FIXME: Use SmallString for accumulating string. 8163 void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, 8164 const ObjCEncOptions Options, 8165 const FieldDecl *FD, 8166 QualType *NotEncodedT) const { 8167 CanQualType CT = getCanonicalType(T); 8168 switch (CT->getTypeClass()) { 8169 case Type::Builtin: 8170 case Type::Enum: 8171 if (FD && FD->isBitField()) 8172 return EncodeBitField(this, S, T, FD); 8173 if (const auto *BT = dyn_cast<BuiltinType>(CT)) 8174 S += getObjCEncodingForPrimitiveType(this, BT); 8175 else 8176 S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); 8177 return; 8178 8179 case Type::Complex: 8180 S += 'j'; 8181 getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S, 8182 ObjCEncOptions(), 8183 /*Field=*/nullptr); 8184 return; 8185 8186 case Type::Atomic: 8187 S += 'A'; 8188 getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S, 8189 ObjCEncOptions(), 8190 /*Field=*/nullptr); 8191 return; 8192 8193 // encoding for pointer or reference types. 8194 case Type::Pointer: 8195 case Type::LValueReference: 8196 case Type::RValueReference: { 8197 QualType PointeeTy; 8198 if (isa<PointerType>(CT)) { 8199 const auto *PT = T->castAs<PointerType>(); 8200 if (PT->isObjCSelType()) { 8201 S += ':'; 8202 return; 8203 } 8204 PointeeTy = PT->getPointeeType(); 8205 } else { 8206 PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); 8207 } 8208 8209 bool isReadOnly = false; 8210 // For historical/compatibility reasons, the read-only qualifier of the 8211 // pointee gets emitted _before_ the '^'. The read-only qualifier of 8212 // the pointer itself gets ignored, _unless_ we are looking at a typedef! 8213 // Also, do not emit the 'r' for anything but the outermost type! 8214 if (T->getAs<TypedefType>()) { 8215 if (Options.IsOutermostType() && T.isConstQualified()) { 8216 isReadOnly = true; 8217 S += 'r'; 8218 } 8219 } else if (Options.IsOutermostType()) { 8220 QualType P = PointeeTy; 8221 while (auto PT = P->getAs<PointerType>()) 8222 P = PT->getPointeeType(); 8223 if (P.isConstQualified()) { 8224 isReadOnly = true; 8225 S += 'r'; 8226 } 8227 } 8228 if (isReadOnly) { 8229 // Another legacy compatibility encoding. Some ObjC qualifier and type 8230 // combinations need to be rearranged. 8231 // Rewrite "in const" from "nr" to "rn" 8232 if (StringRef(S).ends_with("nr")) 8233 S.replace(S.end()-2, S.end(), "rn"); 8234 } 8235 8236 if (PointeeTy->isCharType()) { 8237 // char pointer types should be encoded as '*' unless it is a 8238 // type that has been typedef'd to 'BOOL'. 8239 if (!isTypeTypedefedAsBOOL(PointeeTy)) { 8240 S += '*'; 8241 return; 8242 } 8243 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) { 8244 // GCC binary compat: Need to convert "struct objc_class *" to "#". 8245 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { 8246 S += '#'; 8247 return; 8248 } 8249 // GCC binary compat: Need to convert "struct objc_object *" to "@". 8250 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { 8251 S += '@'; 8252 return; 8253 } 8254 // If the encoded string for the class includes template names, just emit 8255 // "^v" for pointers to the class. 8256 if (getLangOpts().CPlusPlus && 8257 (!getLangOpts().EncodeCXXClassTemplateSpec && 8258 hasTemplateSpecializationInEncodedString( 8259 RTy, Options.ExpandPointedToStructures()))) { 8260 S += "^v"; 8261 return; 8262 } 8263 // fall through... 8264 } 8265 S += '^'; 8266 getLegacyIntegralTypeEncoding(PointeeTy); 8267 8268 ObjCEncOptions NewOptions; 8269 if (Options.ExpandPointedToStructures()) 8270 NewOptions.setExpandStructures(); 8271 getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, 8272 /*Field=*/nullptr, NotEncodedT); 8273 return; 8274 } 8275 8276 case Type::ConstantArray: 8277 case Type::IncompleteArray: 8278 case Type::VariableArray: { 8279 const auto *AT = cast<ArrayType>(CT); 8280 8281 if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) { 8282 // Incomplete arrays are encoded as a pointer to the array element. 8283 S += '^'; 8284 8285 getObjCEncodingForTypeImpl( 8286 AT->getElementType(), S, 8287 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); 8288 } else { 8289 S += '['; 8290 8291 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 8292 S += llvm::utostr(CAT->getSize().getZExtValue()); 8293 else { 8294 //Variable length arrays are encoded as a regular array with 0 elements. 8295 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && 8296 "Unknown array type!"); 8297 S += '0'; 8298 } 8299 8300 getObjCEncodingForTypeImpl( 8301 AT->getElementType(), S, 8302 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, 8303 NotEncodedT); 8304 S += ']'; 8305 } 8306 return; 8307 } 8308 8309 case Type::FunctionNoProto: 8310 case Type::FunctionProto: 8311 S += '?'; 8312 return; 8313 8314 case Type::Record: { 8315 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); 8316 S += RDecl->isUnion() ? '(' : '{'; 8317 // Anonymous structures print as '?' 8318 if (const IdentifierInfo *II = RDecl->getIdentifier()) { 8319 S += II->getName(); 8320 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { 8321 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); 8322 llvm::raw_string_ostream OS(S); 8323 printTemplateArgumentList(OS, TemplateArgs.asArray(), 8324 getPrintingPolicy()); 8325 } 8326 } else { 8327 S += '?'; 8328 } 8329 if (Options.ExpandStructures()) { 8330 S += '='; 8331 if (!RDecl->isUnion()) { 8332 getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); 8333 } else { 8334 for (const auto *Field : RDecl->fields()) { 8335 if (FD) { 8336 S += '"'; 8337 S += Field->getNameAsString(); 8338 S += '"'; 8339 } 8340 8341 // Special case bit-fields. 8342 if (Field->isBitField()) { 8343 getObjCEncodingForTypeImpl(Field->getType(), S, 8344 ObjCEncOptions().setExpandStructures(), 8345 Field); 8346 } else { 8347 QualType qt = Field->getType(); 8348 getLegacyIntegralTypeEncoding(qt); 8349 getObjCEncodingForTypeImpl( 8350 qt, S, 8351 ObjCEncOptions().setExpandStructures().setIsStructField(), FD, 8352 NotEncodedT); 8353 } 8354 } 8355 } 8356 } 8357 S += RDecl->isUnion() ? ')' : '}'; 8358 return; 8359 } 8360 8361 case Type::BlockPointer: { 8362 const auto *BT = T->castAs<BlockPointerType>(); 8363 S += "@?"; // Unlike a pointer-to-function, which is "^?". 8364 if (Options.EncodeBlockParameters()) { 8365 const auto *FT = BT->getPointeeType()->castAs<FunctionType>(); 8366 8367 S += '<'; 8368 // Block return type 8369 getObjCEncodingForTypeImpl(FT->getReturnType(), S, 8370 Options.forComponentType(), FD, NotEncodedT); 8371 // Block self 8372 S += "@?"; 8373 // Block parameters 8374 if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) { 8375 for (const auto &I : FPT->param_types()) 8376 getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, 8377 NotEncodedT); 8378 } 8379 S += '>'; 8380 } 8381 return; 8382 } 8383 8384 case Type::ObjCObject: { 8385 // hack to match legacy encoding of *id and *Class 8386 QualType Ty = getObjCObjectPointerType(CT); 8387 if (Ty->isObjCIdType()) { 8388 S += "{objc_object=}"; 8389 return; 8390 } 8391 else if (Ty->isObjCClassType()) { 8392 S += "{objc_class=}"; 8393 return; 8394 } 8395 // TODO: Double check to make sure this intentionally falls through. 8396 [[fallthrough]]; 8397 } 8398 8399 case Type::ObjCInterface: { 8400 // Ignore protocol qualifiers when mangling at this level. 8401 // @encode(class_name) 8402 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface(); 8403 S += '{'; 8404 S += OI->getObjCRuntimeNameAsString(); 8405 if (Options.ExpandStructures()) { 8406 S += '='; 8407 SmallVector<const ObjCIvarDecl*, 32> Ivars; 8408 DeepCollectObjCIvars(OI, true, Ivars); 8409 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 8410 const FieldDecl *Field = Ivars[i]; 8411 if (Field->isBitField()) 8412 getObjCEncodingForTypeImpl(Field->getType(), S, 8413 ObjCEncOptions().setExpandStructures(), 8414 Field); 8415 else 8416 getObjCEncodingForTypeImpl(Field->getType(), S, 8417 ObjCEncOptions().setExpandStructures(), FD, 8418 NotEncodedT); 8419 } 8420 } 8421 S += '}'; 8422 return; 8423 } 8424 8425 case Type::ObjCObjectPointer: { 8426 const auto *OPT = T->castAs<ObjCObjectPointerType>(); 8427 if (OPT->isObjCIdType()) { 8428 S += '@'; 8429 return; 8430 } 8431 8432 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { 8433 // FIXME: Consider if we need to output qualifiers for 'Class<p>'. 8434 // Since this is a binary compatibility issue, need to consult with 8435 // runtime folks. Fortunately, this is a *very* obscure construct. 8436 S += '#'; 8437 return; 8438 } 8439 8440 if (OPT->isObjCQualifiedIdType()) { 8441 getObjCEncodingForTypeImpl( 8442 getObjCIdType(), S, 8443 Options.keepingOnly(ObjCEncOptions() 8444 .setExpandPointedToStructures() 8445 .setExpandStructures()), 8446 FD); 8447 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { 8448 // Note that we do extended encoding of protocol qualifier list 8449 // Only when doing ivar or property encoding. 8450 S += '"'; 8451 for (const auto *I : OPT->quals()) { 8452 S += '<'; 8453 S += I->getObjCRuntimeNameAsString(); 8454 S += '>'; 8455 } 8456 S += '"'; 8457 } 8458 return; 8459 } 8460 8461 S += '@'; 8462 if (OPT->getInterfaceDecl() && 8463 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { 8464 S += '"'; 8465 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); 8466 for (const auto *I : OPT->quals()) { 8467 S += '<'; 8468 S += I->getObjCRuntimeNameAsString(); 8469 S += '>'; 8470 } 8471 S += '"'; 8472 } 8473 return; 8474 } 8475 8476 // gcc just blithely ignores member pointers. 8477 // FIXME: we should do better than that. 'M' is available. 8478 case Type::MemberPointer: 8479 // This matches gcc's encoding, even though technically it is insufficient. 8480 //FIXME. We should do a better job than gcc. 8481 case Type::Vector: 8482 case Type::ExtVector: 8483 // Until we have a coherent encoding of these three types, issue warning. 8484 if (NotEncodedT) 8485 *NotEncodedT = T; 8486 return; 8487 8488 case Type::ConstantMatrix: 8489 if (NotEncodedT) 8490 *NotEncodedT = T; 8491 return; 8492 8493 case Type::BitInt: 8494 if (NotEncodedT) 8495 *NotEncodedT = T; 8496 return; 8497 8498 // We could see an undeduced auto type here during error recovery. 8499 // Just ignore it. 8500 case Type::Auto: 8501 case Type::DeducedTemplateSpecialization: 8502 return; 8503 8504 case Type::Pipe: 8505 #define ABSTRACT_TYPE(KIND, BASE) 8506 #define TYPE(KIND, BASE) 8507 #define DEPENDENT_TYPE(KIND, BASE) \ 8508 case Type::KIND: 8509 #define NON_CANONICAL_TYPE(KIND, BASE) \ 8510 case Type::KIND: 8511 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ 8512 case Type::KIND: 8513 #include "clang/AST/TypeNodes.inc" 8514 llvm_unreachable("@encode for dependent type!"); 8515 } 8516 llvm_unreachable("bad type kind!"); 8517 } 8518 8519 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, 8520 std::string &S, 8521 const FieldDecl *FD, 8522 bool includeVBases, 8523 QualType *NotEncodedT) const { 8524 assert(RDecl && "Expected non-null RecordDecl"); 8525 assert(!RDecl->isUnion() && "Should not be called for unions"); 8526 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) 8527 return; 8528 8529 const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); 8530 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; 8531 const ASTRecordLayout &layout = getASTRecordLayout(RDecl); 8532 8533 if (CXXRec) { 8534 for (const auto &BI : CXXRec->bases()) { 8535 if (!BI.isVirtual()) { 8536 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8537 if (base->isEmpty()) 8538 continue; 8539 uint64_t offs = toBits(layout.getBaseClassOffset(base)); 8540 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8541 std::make_pair(offs, base)); 8542 } 8543 } 8544 } 8545 8546 for (FieldDecl *Field : RDecl->fields()) { 8547 if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) 8548 continue; 8549 uint64_t offs = layout.getFieldOffset(Field->getFieldIndex()); 8550 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8551 std::make_pair(offs, Field)); 8552 } 8553 8554 if (CXXRec && includeVBases) { 8555 for (const auto &BI : CXXRec->vbases()) { 8556 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8557 if (base->isEmpty()) 8558 continue; 8559 uint64_t offs = toBits(layout.getVBaseClassOffset(base)); 8560 if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && 8561 FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) 8562 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), 8563 std::make_pair(offs, base)); 8564 } 8565 } 8566 8567 CharUnits size; 8568 if (CXXRec) { 8569 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); 8570 } else { 8571 size = layout.getSize(); 8572 } 8573 8574 #ifndef NDEBUG 8575 uint64_t CurOffs = 0; 8576 #endif 8577 std::multimap<uint64_t, NamedDecl *>::iterator 8578 CurLayObj = FieldOrBaseOffsets.begin(); 8579 8580 if (CXXRec && CXXRec->isDynamicClass() && 8581 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { 8582 if (FD) { 8583 S += "\"_vptr$"; 8584 std::string recname = CXXRec->getNameAsString(); 8585 if (recname.empty()) recname = "?"; 8586 S += recname; 8587 S += '"'; 8588 } 8589 S += "^^?"; 8590 #ifndef NDEBUG 8591 CurOffs += getTypeSize(VoidPtrTy); 8592 #endif 8593 } 8594 8595 if (!RDecl->hasFlexibleArrayMember()) { 8596 // Mark the end of the structure. 8597 uint64_t offs = toBits(size); 8598 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8599 std::make_pair(offs, nullptr)); 8600 } 8601 8602 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { 8603 #ifndef NDEBUG 8604 assert(CurOffs <= CurLayObj->first); 8605 if (CurOffs < CurLayObj->first) { 8606 uint64_t padding = CurLayObj->first - CurOffs; 8607 // FIXME: There doesn't seem to be a way to indicate in the encoding that 8608 // packing/alignment of members is different that normal, in which case 8609 // the encoding will be out-of-sync with the real layout. 8610 // If the runtime switches to just consider the size of types without 8611 // taking into account alignment, we could make padding explicit in the 8612 // encoding (e.g. using arrays of chars). The encoding strings would be 8613 // longer then though. 8614 CurOffs += padding; 8615 } 8616 #endif 8617 8618 NamedDecl *dcl = CurLayObj->second; 8619 if (!dcl) 8620 break; // reached end of structure. 8621 8622 if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) { 8623 // We expand the bases without their virtual bases since those are going 8624 // in the initial structure. Note that this differs from gcc which 8625 // expands virtual bases each time one is encountered in the hierarchy, 8626 // making the encoding type bigger than it really is. 8627 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, 8628 NotEncodedT); 8629 assert(!base->isEmpty()); 8630 #ifndef NDEBUG 8631 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); 8632 #endif 8633 } else { 8634 const auto *field = cast<FieldDecl>(dcl); 8635 if (FD) { 8636 S += '"'; 8637 S += field->getNameAsString(); 8638 S += '"'; 8639 } 8640 8641 if (field->isBitField()) { 8642 EncodeBitField(this, S, field->getType(), field); 8643 #ifndef NDEBUG 8644 CurOffs += field->getBitWidthValue(*this); 8645 #endif 8646 } else { 8647 QualType qt = field->getType(); 8648 getLegacyIntegralTypeEncoding(qt); 8649 getObjCEncodingForTypeImpl( 8650 qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), 8651 FD, NotEncodedT); 8652 #ifndef NDEBUG 8653 CurOffs += getTypeSize(field->getType()); 8654 #endif 8655 } 8656 } 8657 } 8658 } 8659 8660 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, 8661 std::string& S) const { 8662 if (QT & Decl::OBJC_TQ_In) 8663 S += 'n'; 8664 if (QT & Decl::OBJC_TQ_Inout) 8665 S += 'N'; 8666 if (QT & Decl::OBJC_TQ_Out) 8667 S += 'o'; 8668 if (QT & Decl::OBJC_TQ_Bycopy) 8669 S += 'O'; 8670 if (QT & Decl::OBJC_TQ_Byref) 8671 S += 'R'; 8672 if (QT & Decl::OBJC_TQ_Oneway) 8673 S += 'V'; 8674 } 8675 8676 TypedefDecl *ASTContext::getObjCIdDecl() const { 8677 if (!ObjCIdDecl) { 8678 QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); 8679 T = getObjCObjectPointerType(T); 8680 ObjCIdDecl = buildImplicitTypedef(T, "id"); 8681 } 8682 return ObjCIdDecl; 8683 } 8684 8685 TypedefDecl *ASTContext::getObjCSelDecl() const { 8686 if (!ObjCSelDecl) { 8687 QualType T = getPointerType(ObjCBuiltinSelTy); 8688 ObjCSelDecl = buildImplicitTypedef(T, "SEL"); 8689 } 8690 return ObjCSelDecl; 8691 } 8692 8693 TypedefDecl *ASTContext::getObjCClassDecl() const { 8694 if (!ObjCClassDecl) { 8695 QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); 8696 T = getObjCObjectPointerType(T); 8697 ObjCClassDecl = buildImplicitTypedef(T, "Class"); 8698 } 8699 return ObjCClassDecl; 8700 } 8701 8702 ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { 8703 if (!ObjCProtocolClassDecl) { 8704 ObjCProtocolClassDecl 8705 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), 8706 SourceLocation(), 8707 &Idents.get("Protocol"), 8708 /*typeParamList=*/nullptr, 8709 /*PrevDecl=*/nullptr, 8710 SourceLocation(), true); 8711 } 8712 8713 return ObjCProtocolClassDecl; 8714 } 8715 8716 //===----------------------------------------------------------------------===// 8717 // __builtin_va_list Construction Functions 8718 //===----------------------------------------------------------------------===// 8719 8720 static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, 8721 StringRef Name) { 8722 // typedef char* __builtin[_ms]_va_list; 8723 QualType T = Context->getPointerType(Context->CharTy); 8724 return Context->buildImplicitTypedef(T, Name); 8725 } 8726 8727 static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { 8728 return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); 8729 } 8730 8731 static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { 8732 return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); 8733 } 8734 8735 static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { 8736 // typedef void* __builtin_va_list; 8737 QualType T = Context->getPointerType(Context->VoidTy); 8738 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8739 } 8740 8741 static TypedefDecl * 8742 CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { 8743 // struct __va_list 8744 RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); 8745 if (Context->getLangOpts().CPlusPlus) { 8746 // namespace std { struct __va_list { 8747 auto *NS = NamespaceDecl::Create( 8748 const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(), 8749 /*Inline=*/false, SourceLocation(), SourceLocation(), 8750 &Context->Idents.get("std"), 8751 /*PrevDecl=*/nullptr, /*Nested=*/false); 8752 NS->setImplicit(); 8753 VaListTagDecl->setDeclContext(NS); 8754 } 8755 8756 VaListTagDecl->startDefinition(); 8757 8758 const size_t NumFields = 5; 8759 QualType FieldTypes[NumFields]; 8760 const char *FieldNames[NumFields]; 8761 8762 // void *__stack; 8763 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8764 FieldNames[0] = "__stack"; 8765 8766 // void *__gr_top; 8767 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8768 FieldNames[1] = "__gr_top"; 8769 8770 // void *__vr_top; 8771 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8772 FieldNames[2] = "__vr_top"; 8773 8774 // int __gr_offs; 8775 FieldTypes[3] = Context->IntTy; 8776 FieldNames[3] = "__gr_offs"; 8777 8778 // int __vr_offs; 8779 FieldTypes[4] = Context->IntTy; 8780 FieldNames[4] = "__vr_offs"; 8781 8782 // Create fields 8783 for (unsigned i = 0; i < NumFields; ++i) { 8784 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8785 VaListTagDecl, 8786 SourceLocation(), 8787 SourceLocation(), 8788 &Context->Idents.get(FieldNames[i]), 8789 FieldTypes[i], /*TInfo=*/nullptr, 8790 /*BitWidth=*/nullptr, 8791 /*Mutable=*/false, 8792 ICIS_NoInit); 8793 Field->setAccess(AS_public); 8794 VaListTagDecl->addDecl(Field); 8795 } 8796 VaListTagDecl->completeDefinition(); 8797 Context->VaListTagDecl = VaListTagDecl; 8798 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8799 8800 // } __builtin_va_list; 8801 return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); 8802 } 8803 8804 static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { 8805 // typedef struct __va_list_tag { 8806 RecordDecl *VaListTagDecl; 8807 8808 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8809 VaListTagDecl->startDefinition(); 8810 8811 const size_t NumFields = 5; 8812 QualType FieldTypes[NumFields]; 8813 const char *FieldNames[NumFields]; 8814 8815 // unsigned char gpr; 8816 FieldTypes[0] = Context->UnsignedCharTy; 8817 FieldNames[0] = "gpr"; 8818 8819 // unsigned char fpr; 8820 FieldTypes[1] = Context->UnsignedCharTy; 8821 FieldNames[1] = "fpr"; 8822 8823 // unsigned short reserved; 8824 FieldTypes[2] = Context->UnsignedShortTy; 8825 FieldNames[2] = "reserved"; 8826 8827 // void* overflow_arg_area; 8828 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8829 FieldNames[3] = "overflow_arg_area"; 8830 8831 // void* reg_save_area; 8832 FieldTypes[4] = Context->getPointerType(Context->VoidTy); 8833 FieldNames[4] = "reg_save_area"; 8834 8835 // Create fields 8836 for (unsigned i = 0; i < NumFields; ++i) { 8837 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, 8838 SourceLocation(), 8839 SourceLocation(), 8840 &Context->Idents.get(FieldNames[i]), 8841 FieldTypes[i], /*TInfo=*/nullptr, 8842 /*BitWidth=*/nullptr, 8843 /*Mutable=*/false, 8844 ICIS_NoInit); 8845 Field->setAccess(AS_public); 8846 VaListTagDecl->addDecl(Field); 8847 } 8848 VaListTagDecl->completeDefinition(); 8849 Context->VaListTagDecl = VaListTagDecl; 8850 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8851 8852 // } __va_list_tag; 8853 TypedefDecl *VaListTagTypedefDecl = 8854 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8855 8856 QualType VaListTagTypedefType = 8857 Context->getTypedefType(VaListTagTypedefDecl); 8858 8859 // typedef __va_list_tag __builtin_va_list[1]; 8860 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8861 QualType VaListTagArrayType = Context->getConstantArrayType( 8862 VaListTagTypedefType, Size, nullptr, ArraySizeModifier::Normal, 0); 8863 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8864 } 8865 8866 static TypedefDecl * 8867 CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { 8868 // struct __va_list_tag { 8869 RecordDecl *VaListTagDecl; 8870 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8871 VaListTagDecl->startDefinition(); 8872 8873 const size_t NumFields = 4; 8874 QualType FieldTypes[NumFields]; 8875 const char *FieldNames[NumFields]; 8876 8877 // unsigned gp_offset; 8878 FieldTypes[0] = Context->UnsignedIntTy; 8879 FieldNames[0] = "gp_offset"; 8880 8881 // unsigned fp_offset; 8882 FieldTypes[1] = Context->UnsignedIntTy; 8883 FieldNames[1] = "fp_offset"; 8884 8885 // void* overflow_arg_area; 8886 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8887 FieldNames[2] = "overflow_arg_area"; 8888 8889 // void* reg_save_area; 8890 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8891 FieldNames[3] = "reg_save_area"; 8892 8893 // Create fields 8894 for (unsigned i = 0; i < NumFields; ++i) { 8895 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8896 VaListTagDecl, 8897 SourceLocation(), 8898 SourceLocation(), 8899 &Context->Idents.get(FieldNames[i]), 8900 FieldTypes[i], /*TInfo=*/nullptr, 8901 /*BitWidth=*/nullptr, 8902 /*Mutable=*/false, 8903 ICIS_NoInit); 8904 Field->setAccess(AS_public); 8905 VaListTagDecl->addDecl(Field); 8906 } 8907 VaListTagDecl->completeDefinition(); 8908 Context->VaListTagDecl = VaListTagDecl; 8909 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8910 8911 // }; 8912 8913 // typedef struct __va_list_tag __builtin_va_list[1]; 8914 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8915 QualType VaListTagArrayType = Context->getConstantArrayType( 8916 VaListTagType, Size, nullptr, ArraySizeModifier::Normal, 0); 8917 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8918 } 8919 8920 static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { 8921 // typedef int __builtin_va_list[4]; 8922 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); 8923 QualType IntArrayType = Context->getConstantArrayType( 8924 Context->IntTy, Size, nullptr, ArraySizeModifier::Normal, 0); 8925 return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); 8926 } 8927 8928 static TypedefDecl * 8929 CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { 8930 // struct __va_list 8931 RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); 8932 if (Context->getLangOpts().CPlusPlus) { 8933 // namespace std { struct __va_list { 8934 NamespaceDecl *NS; 8935 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 8936 Context->getTranslationUnitDecl(), 8937 /*Inline=*/false, SourceLocation(), 8938 SourceLocation(), &Context->Idents.get("std"), 8939 /*PrevDecl=*/nullptr, /*Nested=*/false); 8940 NS->setImplicit(); 8941 VaListDecl->setDeclContext(NS); 8942 } 8943 8944 VaListDecl->startDefinition(); 8945 8946 // void * __ap; 8947 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8948 VaListDecl, 8949 SourceLocation(), 8950 SourceLocation(), 8951 &Context->Idents.get("__ap"), 8952 Context->getPointerType(Context->VoidTy), 8953 /*TInfo=*/nullptr, 8954 /*BitWidth=*/nullptr, 8955 /*Mutable=*/false, 8956 ICIS_NoInit); 8957 Field->setAccess(AS_public); 8958 VaListDecl->addDecl(Field); 8959 8960 // }; 8961 VaListDecl->completeDefinition(); 8962 Context->VaListTagDecl = VaListDecl; 8963 8964 // typedef struct __va_list __builtin_va_list; 8965 QualType T = Context->getRecordType(VaListDecl); 8966 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8967 } 8968 8969 static TypedefDecl * 8970 CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { 8971 // struct __va_list_tag { 8972 RecordDecl *VaListTagDecl; 8973 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8974 VaListTagDecl->startDefinition(); 8975 8976 const size_t NumFields = 4; 8977 QualType FieldTypes[NumFields]; 8978 const char *FieldNames[NumFields]; 8979 8980 // long __gpr; 8981 FieldTypes[0] = Context->LongTy; 8982 FieldNames[0] = "__gpr"; 8983 8984 // long __fpr; 8985 FieldTypes[1] = Context->LongTy; 8986 FieldNames[1] = "__fpr"; 8987 8988 // void *__overflow_arg_area; 8989 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8990 FieldNames[2] = "__overflow_arg_area"; 8991 8992 // void *__reg_save_area; 8993 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8994 FieldNames[3] = "__reg_save_area"; 8995 8996 // Create fields 8997 for (unsigned i = 0; i < NumFields; ++i) { 8998 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8999 VaListTagDecl, 9000 SourceLocation(), 9001 SourceLocation(), 9002 &Context->Idents.get(FieldNames[i]), 9003 FieldTypes[i], /*TInfo=*/nullptr, 9004 /*BitWidth=*/nullptr, 9005 /*Mutable=*/false, 9006 ICIS_NoInit); 9007 Field->setAccess(AS_public); 9008 VaListTagDecl->addDecl(Field); 9009 } 9010 VaListTagDecl->completeDefinition(); 9011 Context->VaListTagDecl = VaListTagDecl; 9012 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9013 9014 // }; 9015 9016 // typedef __va_list_tag __builtin_va_list[1]; 9017 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9018 QualType VaListTagArrayType = Context->getConstantArrayType( 9019 VaListTagType, Size, nullptr, ArraySizeModifier::Normal, 0); 9020 9021 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9022 } 9023 9024 static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { 9025 // typedef struct __va_list_tag { 9026 RecordDecl *VaListTagDecl; 9027 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 9028 VaListTagDecl->startDefinition(); 9029 9030 const size_t NumFields = 3; 9031 QualType FieldTypes[NumFields]; 9032 const char *FieldNames[NumFields]; 9033 9034 // void *CurrentSavedRegisterArea; 9035 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 9036 FieldNames[0] = "__current_saved_reg_area_pointer"; 9037 9038 // void *SavedRegAreaEnd; 9039 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 9040 FieldNames[1] = "__saved_reg_area_end_pointer"; 9041 9042 // void *OverflowArea; 9043 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 9044 FieldNames[2] = "__overflow_area_pointer"; 9045 9046 // Create fields 9047 for (unsigned i = 0; i < NumFields; ++i) { 9048 FieldDecl *Field = FieldDecl::Create( 9049 const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(), 9050 SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], 9051 /*TInfo=*/nullptr, 9052 /*BitWidth=*/nullptr, 9053 /*Mutable=*/false, ICIS_NoInit); 9054 Field->setAccess(AS_public); 9055 VaListTagDecl->addDecl(Field); 9056 } 9057 VaListTagDecl->completeDefinition(); 9058 Context->VaListTagDecl = VaListTagDecl; 9059 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9060 9061 // } __va_list_tag; 9062 TypedefDecl *VaListTagTypedefDecl = 9063 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 9064 9065 QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); 9066 9067 // typedef __va_list_tag __builtin_va_list[1]; 9068 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9069 QualType VaListTagArrayType = Context->getConstantArrayType( 9070 VaListTagTypedefType, Size, nullptr, ArraySizeModifier::Normal, 0); 9071 9072 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9073 } 9074 9075 static TypedefDecl *CreateVaListDecl(const ASTContext *Context, 9076 TargetInfo::BuiltinVaListKind Kind) { 9077 switch (Kind) { 9078 case TargetInfo::CharPtrBuiltinVaList: 9079 return CreateCharPtrBuiltinVaListDecl(Context); 9080 case TargetInfo::VoidPtrBuiltinVaList: 9081 return CreateVoidPtrBuiltinVaListDecl(Context); 9082 case TargetInfo::AArch64ABIBuiltinVaList: 9083 return CreateAArch64ABIBuiltinVaListDecl(Context); 9084 case TargetInfo::PowerABIBuiltinVaList: 9085 return CreatePowerABIBuiltinVaListDecl(Context); 9086 case TargetInfo::X86_64ABIBuiltinVaList: 9087 return CreateX86_64ABIBuiltinVaListDecl(Context); 9088 case TargetInfo::PNaClABIBuiltinVaList: 9089 return CreatePNaClABIBuiltinVaListDecl(Context); 9090 case TargetInfo::AAPCSABIBuiltinVaList: 9091 return CreateAAPCSABIBuiltinVaListDecl(Context); 9092 case TargetInfo::SystemZBuiltinVaList: 9093 return CreateSystemZBuiltinVaListDecl(Context); 9094 case TargetInfo::HexagonBuiltinVaList: 9095 return CreateHexagonBuiltinVaListDecl(Context); 9096 } 9097 9098 llvm_unreachable("Unhandled __builtin_va_list type kind"); 9099 } 9100 9101 TypedefDecl *ASTContext::getBuiltinVaListDecl() const { 9102 if (!BuiltinVaListDecl) { 9103 BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); 9104 assert(BuiltinVaListDecl->isImplicit()); 9105 } 9106 9107 return BuiltinVaListDecl; 9108 } 9109 9110 Decl *ASTContext::getVaListTagDecl() const { 9111 // Force the creation of VaListTagDecl by building the __builtin_va_list 9112 // declaration. 9113 if (!VaListTagDecl) 9114 (void)getBuiltinVaListDecl(); 9115 9116 return VaListTagDecl; 9117 } 9118 9119 TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { 9120 if (!BuiltinMSVaListDecl) 9121 BuiltinMSVaListDecl = CreateMSVaListDecl(this); 9122 9123 return BuiltinMSVaListDecl; 9124 } 9125 9126 bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { 9127 // Allow redecl custom type checking builtin for HLSL. 9128 if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin && 9129 BuiltinInfo.hasCustomTypechecking(FD->getBuiltinID())) 9130 return true; 9131 return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); 9132 } 9133 9134 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { 9135 assert(ObjCConstantStringType.isNull() && 9136 "'NSConstantString' type already set!"); 9137 9138 ObjCConstantStringType = getObjCInterfaceType(Decl); 9139 } 9140 9141 /// Retrieve the template name that corresponds to a non-empty 9142 /// lookup. 9143 TemplateName 9144 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, 9145 UnresolvedSetIterator End) const { 9146 unsigned size = End - Begin; 9147 assert(size > 1 && "set is not overloaded!"); 9148 9149 void *memory = Allocate(sizeof(OverloadedTemplateStorage) + 9150 size * sizeof(FunctionTemplateDecl*)); 9151 auto *OT = new (memory) OverloadedTemplateStorage(size); 9152 9153 NamedDecl **Storage = OT->getStorage(); 9154 for (UnresolvedSetIterator I = Begin; I != End; ++I) { 9155 NamedDecl *D = *I; 9156 assert(isa<FunctionTemplateDecl>(D) || 9157 isa<UnresolvedUsingValueDecl>(D) || 9158 (isa<UsingShadowDecl>(D) && 9159 isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); 9160 *Storage++ = D; 9161 } 9162 9163 return TemplateName(OT); 9164 } 9165 9166 /// Retrieve a template name representing an unqualified-id that has been 9167 /// assumed to name a template for ADL purposes. 9168 TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { 9169 auto *OT = new (*this) AssumedTemplateStorage(Name); 9170 return TemplateName(OT); 9171 } 9172 9173 /// Retrieve the template name that represents a qualified 9174 /// template name such as \c std::vector. 9175 TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, 9176 bool TemplateKeyword, 9177 TemplateName Template) const { 9178 assert(NNS && "Missing nested-name-specifier in qualified template name"); 9179 9180 // FIXME: Canonicalization? 9181 llvm::FoldingSetNodeID ID; 9182 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); 9183 9184 void *InsertPos = nullptr; 9185 QualifiedTemplateName *QTN = 9186 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9187 if (!QTN) { 9188 QTN = new (*this, alignof(QualifiedTemplateName)) 9189 QualifiedTemplateName(NNS, TemplateKeyword, Template); 9190 QualifiedTemplateNames.InsertNode(QTN, InsertPos); 9191 } 9192 9193 return TemplateName(QTN); 9194 } 9195 9196 /// Retrieve the template name that represents a dependent 9197 /// template name such as \c MetaFun::template apply. 9198 TemplateName 9199 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9200 const IdentifierInfo *Name) const { 9201 assert((!NNS || NNS->isDependent()) && 9202 "Nested name specifier must be dependent"); 9203 9204 llvm::FoldingSetNodeID ID; 9205 DependentTemplateName::Profile(ID, NNS, Name); 9206 9207 void *InsertPos = nullptr; 9208 DependentTemplateName *QTN = 9209 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9210 9211 if (QTN) 9212 return TemplateName(QTN); 9213 9214 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9215 if (CanonNNS == NNS) { 9216 QTN = new (*this, alignof(DependentTemplateName)) 9217 DependentTemplateName(NNS, Name); 9218 } else { 9219 TemplateName Canon = getDependentTemplateName(CanonNNS, Name); 9220 QTN = new (*this, alignof(DependentTemplateName)) 9221 DependentTemplateName(NNS, Name, Canon); 9222 DependentTemplateName *CheckQTN = 9223 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9224 assert(!CheckQTN && "Dependent type name canonicalization broken"); 9225 (void)CheckQTN; 9226 } 9227 9228 DependentTemplateNames.InsertNode(QTN, InsertPos); 9229 return TemplateName(QTN); 9230 } 9231 9232 /// Retrieve the template name that represents a dependent 9233 /// template name such as \c MetaFun::template operator+. 9234 TemplateName 9235 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9236 OverloadedOperatorKind Operator) const { 9237 assert((!NNS || NNS->isDependent()) && 9238 "Nested name specifier must be dependent"); 9239 9240 llvm::FoldingSetNodeID ID; 9241 DependentTemplateName::Profile(ID, NNS, Operator); 9242 9243 void *InsertPos = nullptr; 9244 DependentTemplateName *QTN 9245 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9246 9247 if (QTN) 9248 return TemplateName(QTN); 9249 9250 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9251 if (CanonNNS == NNS) { 9252 QTN = new (*this, alignof(DependentTemplateName)) 9253 DependentTemplateName(NNS, Operator); 9254 } else { 9255 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); 9256 QTN = new (*this, alignof(DependentTemplateName)) 9257 DependentTemplateName(NNS, Operator, Canon); 9258 9259 DependentTemplateName *CheckQTN 9260 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9261 assert(!CheckQTN && "Dependent template name canonicalization broken"); 9262 (void)CheckQTN; 9263 } 9264 9265 DependentTemplateNames.InsertNode(QTN, InsertPos); 9266 return TemplateName(QTN); 9267 } 9268 9269 TemplateName ASTContext::getSubstTemplateTemplateParm( 9270 TemplateName Replacement, Decl *AssociatedDecl, unsigned Index, 9271 std::optional<unsigned> PackIndex) const { 9272 llvm::FoldingSetNodeID ID; 9273 SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl, 9274 Index, PackIndex); 9275 9276 void *insertPos = nullptr; 9277 SubstTemplateTemplateParmStorage *subst 9278 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); 9279 9280 if (!subst) { 9281 subst = new (*this) SubstTemplateTemplateParmStorage( 9282 Replacement, AssociatedDecl, Index, PackIndex); 9283 SubstTemplateTemplateParms.InsertNode(subst, insertPos); 9284 } 9285 9286 return TemplateName(subst); 9287 } 9288 9289 TemplateName 9290 ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack, 9291 Decl *AssociatedDecl, 9292 unsigned Index, bool Final) const { 9293 auto &Self = const_cast<ASTContext &>(*this); 9294 llvm::FoldingSetNodeID ID; 9295 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, ArgPack, 9296 AssociatedDecl, Index, Final); 9297 9298 void *InsertPos = nullptr; 9299 SubstTemplateTemplateParmPackStorage *Subst 9300 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); 9301 9302 if (!Subst) { 9303 Subst = new (*this) SubstTemplateTemplateParmPackStorage( 9304 ArgPack.pack_elements(), AssociatedDecl, Index, Final); 9305 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); 9306 } 9307 9308 return TemplateName(Subst); 9309 } 9310 9311 /// getFromTargetType - Given one of the integer types provided by 9312 /// TargetInfo, produce the corresponding type. The unsigned @p Type 9313 /// is actually a value of type @c TargetInfo::IntType. 9314 CanQualType ASTContext::getFromTargetType(unsigned Type) const { 9315 switch (Type) { 9316 case TargetInfo::NoInt: return {}; 9317 case TargetInfo::SignedChar: return SignedCharTy; 9318 case TargetInfo::UnsignedChar: return UnsignedCharTy; 9319 case TargetInfo::SignedShort: return ShortTy; 9320 case TargetInfo::UnsignedShort: return UnsignedShortTy; 9321 case TargetInfo::SignedInt: return IntTy; 9322 case TargetInfo::UnsignedInt: return UnsignedIntTy; 9323 case TargetInfo::SignedLong: return LongTy; 9324 case TargetInfo::UnsignedLong: return UnsignedLongTy; 9325 case TargetInfo::SignedLongLong: return LongLongTy; 9326 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; 9327 } 9328 9329 llvm_unreachable("Unhandled TargetInfo::IntType value"); 9330 } 9331 9332 //===----------------------------------------------------------------------===// 9333 // Type Predicates. 9334 //===----------------------------------------------------------------------===// 9335 9336 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's 9337 /// garbage collection attribute. 9338 /// 9339 Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { 9340 if (getLangOpts().getGC() == LangOptions::NonGC) 9341 return Qualifiers::GCNone; 9342 9343 assert(getLangOpts().ObjC); 9344 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); 9345 9346 // Default behaviour under objective-C's gc is for ObjC pointers 9347 // (or pointers to them) be treated as though they were declared 9348 // as __strong. 9349 if (GCAttrs == Qualifiers::GCNone) { 9350 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 9351 return Qualifiers::Strong; 9352 else if (Ty->isPointerType()) 9353 return getObjCGCAttrKind(Ty->castAs<PointerType>()->getPointeeType()); 9354 } else { 9355 // It's not valid to set GC attributes on anything that isn't a 9356 // pointer. 9357 #ifndef NDEBUG 9358 QualType CT = Ty->getCanonicalTypeInternal(); 9359 while (const auto *AT = dyn_cast<ArrayType>(CT)) 9360 CT = AT->getElementType(); 9361 assert(CT->isAnyPointerType() || CT->isBlockPointerType()); 9362 #endif 9363 } 9364 return GCAttrs; 9365 } 9366 9367 //===----------------------------------------------------------------------===// 9368 // Type Compatibility Testing 9369 //===----------------------------------------------------------------------===// 9370 9371 /// areCompatVectorTypes - Return true if the two specified vector types are 9372 /// compatible. 9373 static bool areCompatVectorTypes(const VectorType *LHS, 9374 const VectorType *RHS) { 9375 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9376 return LHS->getElementType() == RHS->getElementType() && 9377 LHS->getNumElements() == RHS->getNumElements(); 9378 } 9379 9380 /// areCompatMatrixTypes - Return true if the two specified matrix types are 9381 /// compatible. 9382 static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, 9383 const ConstantMatrixType *RHS) { 9384 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9385 return LHS->getElementType() == RHS->getElementType() && 9386 LHS->getNumRows() == RHS->getNumRows() && 9387 LHS->getNumColumns() == RHS->getNumColumns(); 9388 } 9389 9390 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, 9391 QualType SecondVec) { 9392 assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); 9393 assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); 9394 9395 if (hasSameUnqualifiedType(FirstVec, SecondVec)) 9396 return true; 9397 9398 // Treat Neon vector types and most AltiVec vector types as if they are the 9399 // equivalent GCC vector types. 9400 const auto *First = FirstVec->castAs<VectorType>(); 9401 const auto *Second = SecondVec->castAs<VectorType>(); 9402 if (First->getNumElements() == Second->getNumElements() && 9403 hasSameType(First->getElementType(), Second->getElementType()) && 9404 First->getVectorKind() != VectorKind::AltiVecPixel && 9405 First->getVectorKind() != VectorKind::AltiVecBool && 9406 Second->getVectorKind() != VectorKind::AltiVecPixel && 9407 Second->getVectorKind() != VectorKind::AltiVecBool && 9408 First->getVectorKind() != VectorKind::SveFixedLengthData && 9409 First->getVectorKind() != VectorKind::SveFixedLengthPredicate && 9410 Second->getVectorKind() != VectorKind::SveFixedLengthData && 9411 Second->getVectorKind() != VectorKind::SveFixedLengthPredicate && 9412 First->getVectorKind() != VectorKind::RVVFixedLengthData && 9413 Second->getVectorKind() != VectorKind::RVVFixedLengthData) 9414 return true; 9415 9416 return false; 9417 } 9418 9419 /// getSVETypeSize - Return SVE vector or predicate register size. 9420 static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { 9421 assert(Ty->isSveVLSBuiltinType() && "Invalid SVE Type"); 9422 if (Ty->getKind() == BuiltinType::SveBool || 9423 Ty->getKind() == BuiltinType::SveCount) 9424 return (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth(); 9425 return Context.getLangOpts().VScaleMin * 128; 9426 } 9427 9428 bool ASTContext::areCompatibleSveTypes(QualType FirstType, 9429 QualType SecondType) { 9430 assert( 9431 ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) || 9432 (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) && 9433 "Expected SVE builtin type and vector type!"); 9434 9435 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9436 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9437 if (const auto *VT = SecondType->getAs<VectorType>()) { 9438 // Predicates have the same representation as uint8 so we also have to 9439 // check the kind to make these types incompatible. 9440 if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) 9441 return BT->getKind() == BuiltinType::SveBool; 9442 else if (VT->getVectorKind() == VectorKind::SveFixedLengthData) 9443 return VT->getElementType().getCanonicalType() == 9444 FirstType->getSveEltType(*this); 9445 else if (VT->getVectorKind() == VectorKind::Generic) 9446 return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && 9447 hasSameType(VT->getElementType(), 9448 getBuiltinVectorTypeInfo(BT).ElementType); 9449 } 9450 } 9451 return false; 9452 }; 9453 9454 return IsValidCast(FirstType, SecondType) || 9455 IsValidCast(SecondType, FirstType); 9456 } 9457 9458 bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, 9459 QualType SecondType) { 9460 assert( 9461 ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) || 9462 (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) && 9463 "Expected SVE builtin type and vector type!"); 9464 9465 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9466 const auto *BT = FirstType->getAs<BuiltinType>(); 9467 if (!BT) 9468 return false; 9469 9470 const auto *VecTy = SecondType->getAs<VectorType>(); 9471 if (VecTy && (VecTy->getVectorKind() == VectorKind::SveFixedLengthData || 9472 VecTy->getVectorKind() == VectorKind::Generic)) { 9473 const LangOptions::LaxVectorConversionKind LVCKind = 9474 getLangOpts().getLaxVectorConversions(); 9475 9476 // Can not convert between sve predicates and sve vectors because of 9477 // different size. 9478 if (BT->getKind() == BuiltinType::SveBool && 9479 VecTy->getVectorKind() == VectorKind::SveFixedLengthData) 9480 return false; 9481 9482 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. 9483 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly 9484 // converts to VLAT and VLAT implicitly converts to GNUT." 9485 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and 9486 // predicates. 9487 if (VecTy->getVectorKind() == VectorKind::Generic && 9488 getTypeSize(SecondType) != getSVETypeSize(*this, BT)) 9489 return false; 9490 9491 // If -flax-vector-conversions=all is specified, the types are 9492 // certainly compatible. 9493 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9494 return true; 9495 9496 // If -flax-vector-conversions=integer is specified, the types are 9497 // compatible if the elements are integer types. 9498 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9499 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9500 FirstType->getSveEltType(*this)->isIntegerType(); 9501 } 9502 9503 return false; 9504 }; 9505 9506 return IsLaxCompatible(FirstType, SecondType) || 9507 IsLaxCompatible(SecondType, FirstType); 9508 } 9509 9510 /// getRVVTypeSize - Return RVV vector register size. 9511 static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) { 9512 assert(Ty->isRVVVLSBuiltinType() && "Invalid RVV Type"); 9513 auto VScale = Context.getTargetInfo().getVScaleRange(Context.getLangOpts()); 9514 if (!VScale) 9515 return 0; 9516 9517 ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty); 9518 9519 uint64_t EltSize = Context.getTypeSize(Info.ElementType); 9520 uint64_t MinElts = Info.EC.getKnownMinValue(); 9521 return VScale->first * MinElts * EltSize; 9522 } 9523 9524 bool ASTContext::areCompatibleRVVTypes(QualType FirstType, 9525 QualType SecondType) { 9526 assert( 9527 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) || 9528 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) && 9529 "Expected RVV builtin type and vector type!"); 9530 9531 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9532 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9533 if (const auto *VT = SecondType->getAs<VectorType>()) { 9534 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData || 9535 VT->getVectorKind() == VectorKind::Generic) 9536 return FirstType->isRVVVLSBuiltinType() && 9537 getTypeSize(SecondType) == getRVVTypeSize(*this, BT) && 9538 hasSameType(VT->getElementType(), 9539 getBuiltinVectorTypeInfo(BT).ElementType); 9540 } 9541 } 9542 return false; 9543 }; 9544 9545 return IsValidCast(FirstType, SecondType) || 9546 IsValidCast(SecondType, FirstType); 9547 } 9548 9549 bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType, 9550 QualType SecondType) { 9551 assert( 9552 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) || 9553 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) && 9554 "Expected RVV builtin type and vector type!"); 9555 9556 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9557 const auto *BT = FirstType->getAs<BuiltinType>(); 9558 if (!BT) 9559 return false; 9560 9561 if (!BT->isRVVVLSBuiltinType()) 9562 return false; 9563 9564 const auto *VecTy = SecondType->getAs<VectorType>(); 9565 if (VecTy && VecTy->getVectorKind() == VectorKind::Generic) { 9566 const LangOptions::LaxVectorConversionKind LVCKind = 9567 getLangOpts().getLaxVectorConversions(); 9568 9569 // If __riscv_v_fixed_vlen != N do not allow vector lax conversion. 9570 if (getTypeSize(SecondType) != getRVVTypeSize(*this, BT)) 9571 return false; 9572 9573 // If -flax-vector-conversions=all is specified, the types are 9574 // certainly compatible. 9575 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9576 return true; 9577 9578 // If -flax-vector-conversions=integer is specified, the types are 9579 // compatible if the elements are integer types. 9580 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9581 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9582 FirstType->getRVVEltType(*this)->isIntegerType(); 9583 } 9584 9585 return false; 9586 }; 9587 9588 return IsLaxCompatible(FirstType, SecondType) || 9589 IsLaxCompatible(SecondType, FirstType); 9590 } 9591 9592 bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { 9593 while (true) { 9594 // __strong id 9595 if (const AttributedType *Attr = dyn_cast<AttributedType>(Ty)) { 9596 if (Attr->getAttrKind() == attr::ObjCOwnership) 9597 return true; 9598 9599 Ty = Attr->getModifiedType(); 9600 9601 // X *__strong (...) 9602 } else if (const ParenType *Paren = dyn_cast<ParenType>(Ty)) { 9603 Ty = Paren->getInnerType(); 9604 9605 // We do not want to look through typedefs, typeof(expr), 9606 // typeof(type), or any other way that the type is somehow 9607 // abstracted. 9608 } else { 9609 return false; 9610 } 9611 } 9612 } 9613 9614 //===----------------------------------------------------------------------===// 9615 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. 9616 //===----------------------------------------------------------------------===// 9617 9618 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the 9619 /// inheritance hierarchy of 'rProto'. 9620 bool 9621 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, 9622 ObjCProtocolDecl *rProto) const { 9623 if (declaresSameEntity(lProto, rProto)) 9624 return true; 9625 for (auto *PI : rProto->protocols()) 9626 if (ProtocolCompatibleWithProtocol(lProto, PI)) 9627 return true; 9628 return false; 9629 } 9630 9631 /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and 9632 /// Class<pr1, ...>. 9633 bool ASTContext::ObjCQualifiedClassTypesAreCompatible( 9634 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { 9635 for (auto *lhsProto : lhs->quals()) { 9636 bool match = false; 9637 for (auto *rhsProto : rhs->quals()) { 9638 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { 9639 match = true; 9640 break; 9641 } 9642 } 9643 if (!match) 9644 return false; 9645 } 9646 return true; 9647 } 9648 9649 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an 9650 /// ObjCQualifiedIDType. 9651 bool ASTContext::ObjCQualifiedIdTypesAreCompatible( 9652 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, 9653 bool compare) { 9654 // Allow id<P..> and an 'id' in all cases. 9655 if (lhs->isObjCIdType() || rhs->isObjCIdType()) 9656 return true; 9657 9658 // Don't allow id<P..> to convert to Class or Class<P..> in either direction. 9659 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || 9660 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) 9661 return false; 9662 9663 if (lhs->isObjCQualifiedIdType()) { 9664 if (rhs->qual_empty()) { 9665 // If the RHS is a unqualified interface pointer "NSString*", 9666 // make sure we check the class hierarchy. 9667 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9668 for (auto *I : lhs->quals()) { 9669 // when comparing an id<P> on lhs with a static type on rhs, 9670 // see if static class implements all of id's protocols, directly or 9671 // through its super class and categories. 9672 if (!rhsID->ClassImplementsProtocol(I, true)) 9673 return false; 9674 } 9675 } 9676 // If there are no qualifiers and no interface, we have an 'id'. 9677 return true; 9678 } 9679 // Both the right and left sides have qualifiers. 9680 for (auto *lhsProto : lhs->quals()) { 9681 bool match = false; 9682 9683 // when comparing an id<P> on lhs with a static type on rhs, 9684 // see if static class implements all of id's protocols, directly or 9685 // through its super class and categories. 9686 for (auto *rhsProto : rhs->quals()) { 9687 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9688 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9689 match = true; 9690 break; 9691 } 9692 } 9693 // If the RHS is a qualified interface pointer "NSString<P>*", 9694 // make sure we check the class hierarchy. 9695 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9696 for (auto *I : lhs->quals()) { 9697 // when comparing an id<P> on lhs with a static type on rhs, 9698 // see if static class implements all of id's protocols, directly or 9699 // through its super class and categories. 9700 if (rhsID->ClassImplementsProtocol(I, true)) { 9701 match = true; 9702 break; 9703 } 9704 } 9705 } 9706 if (!match) 9707 return false; 9708 } 9709 9710 return true; 9711 } 9712 9713 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>"); 9714 9715 if (lhs->getInterfaceType()) { 9716 // If both the right and left sides have qualifiers. 9717 for (auto *lhsProto : lhs->quals()) { 9718 bool match = false; 9719 9720 // when comparing an id<P> on rhs with a static type on lhs, 9721 // see if static class implements all of id's protocols, directly or 9722 // through its super class and categories. 9723 // First, lhs protocols in the qualifier list must be found, direct 9724 // or indirect in rhs's qualifier list or it is a mismatch. 9725 for (auto *rhsProto : rhs->quals()) { 9726 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9727 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9728 match = true; 9729 break; 9730 } 9731 } 9732 if (!match) 9733 return false; 9734 } 9735 9736 // Static class's protocols, or its super class or category protocols 9737 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. 9738 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { 9739 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 9740 CollectInheritedProtocols(lhsID, LHSInheritedProtocols); 9741 // This is rather dubious but matches gcc's behavior. If lhs has 9742 // no type qualifier and its class has no static protocol(s) 9743 // assume that it is mismatch. 9744 if (LHSInheritedProtocols.empty() && lhs->qual_empty()) 9745 return false; 9746 for (auto *lhsProto : LHSInheritedProtocols) { 9747 bool match = false; 9748 for (auto *rhsProto : rhs->quals()) { 9749 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9750 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9751 match = true; 9752 break; 9753 } 9754 } 9755 if (!match) 9756 return false; 9757 } 9758 } 9759 return true; 9760 } 9761 return false; 9762 } 9763 9764 /// canAssignObjCInterfaces - Return true if the two interface types are 9765 /// compatible for assignment from RHS to LHS. This handles validation of any 9766 /// protocol qualifiers on the LHS or RHS. 9767 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, 9768 const ObjCObjectPointerType *RHSOPT) { 9769 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9770 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9771 9772 // If either type represents the built-in 'id' type, return true. 9773 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) 9774 return true; 9775 9776 // Function object that propagates a successful result or handles 9777 // __kindof types. 9778 auto finish = [&](bool succeeded) -> bool { 9779 if (succeeded) 9780 return true; 9781 9782 if (!RHS->isKindOfType()) 9783 return false; 9784 9785 // Strip off __kindof and protocol qualifiers, then check whether 9786 // we can assign the other way. 9787 return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9788 LHSOPT->stripObjCKindOfTypeAndQuals(*this)); 9789 }; 9790 9791 // Casts from or to id<P> are allowed when the other side has compatible 9792 // protocols. 9793 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { 9794 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); 9795 } 9796 9797 // Verify protocol compatibility for casts from Class<P1> to Class<P2>. 9798 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { 9799 return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); 9800 } 9801 9802 // Casts from Class to Class<Foo>, or vice-versa, are allowed. 9803 if (LHS->isObjCClass() && RHS->isObjCClass()) { 9804 return true; 9805 } 9806 9807 // If we have 2 user-defined types, fall into that path. 9808 if (LHS->getInterface() && RHS->getInterface()) { 9809 return finish(canAssignObjCInterfaces(LHS, RHS)); 9810 } 9811 9812 return false; 9813 } 9814 9815 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written 9816 /// for providing type-safety for objective-c pointers used to pass/return 9817 /// arguments in block literals. When passed as arguments, passing 'A*' where 9818 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is 9819 /// not OK. For the return type, the opposite is not OK. 9820 bool ASTContext::canAssignObjCInterfacesInBlockPointer( 9821 const ObjCObjectPointerType *LHSOPT, 9822 const ObjCObjectPointerType *RHSOPT, 9823 bool BlockReturnType) { 9824 9825 // Function object that propagates a successful result or handles 9826 // __kindof types. 9827 auto finish = [&](bool succeeded) -> bool { 9828 if (succeeded) 9829 return true; 9830 9831 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; 9832 if (!Expected->isKindOfType()) 9833 return false; 9834 9835 // Strip off __kindof and protocol qualifiers, then check whether 9836 // we can assign the other way. 9837 return canAssignObjCInterfacesInBlockPointer( 9838 RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9839 LHSOPT->stripObjCKindOfTypeAndQuals(*this), 9840 BlockReturnType); 9841 }; 9842 9843 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) 9844 return true; 9845 9846 if (LHSOPT->isObjCBuiltinType()) { 9847 return finish(RHSOPT->isObjCBuiltinType() || 9848 RHSOPT->isObjCQualifiedIdType()); 9849 } 9850 9851 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { 9852 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) 9853 // Use for block parameters previous type checking for compatibility. 9854 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || 9855 // Or corrected type checking as in non-compat mode. 9856 (!BlockReturnType && 9857 ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); 9858 else 9859 return finish(ObjCQualifiedIdTypesAreCompatible( 9860 (BlockReturnType ? LHSOPT : RHSOPT), 9861 (BlockReturnType ? RHSOPT : LHSOPT), false)); 9862 } 9863 9864 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); 9865 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); 9866 if (LHS && RHS) { // We have 2 user-defined types. 9867 if (LHS != RHS) { 9868 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) 9869 return finish(BlockReturnType); 9870 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) 9871 return finish(!BlockReturnType); 9872 } 9873 else 9874 return true; 9875 } 9876 return false; 9877 } 9878 9879 /// Comparison routine for Objective-C protocols to be used with 9880 /// llvm::array_pod_sort. 9881 static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, 9882 ObjCProtocolDecl * const *rhs) { 9883 return (*lhs)->getName().compare((*rhs)->getName()); 9884 } 9885 9886 /// getIntersectionOfProtocols - This routine finds the intersection of set 9887 /// of protocols inherited from two distinct objective-c pointer objects with 9888 /// the given common base. 9889 /// It is used to build composite qualifier list of the composite type of 9890 /// the conditional expression involving two objective-c pointer objects. 9891 static 9892 void getIntersectionOfProtocols(ASTContext &Context, 9893 const ObjCInterfaceDecl *CommonBase, 9894 const ObjCObjectPointerType *LHSOPT, 9895 const ObjCObjectPointerType *RHSOPT, 9896 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) { 9897 9898 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9899 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9900 assert(LHS->getInterface() && "LHS must have an interface base"); 9901 assert(RHS->getInterface() && "RHS must have an interface base"); 9902 9903 // Add all of the protocols for the LHS. 9904 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet; 9905 9906 // Start with the protocol qualifiers. 9907 for (auto *proto : LHS->quals()) { 9908 Context.CollectInheritedProtocols(proto, LHSProtocolSet); 9909 } 9910 9911 // Also add the protocols associated with the LHS interface. 9912 Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); 9913 9914 // Add all of the protocols for the RHS. 9915 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet; 9916 9917 // Start with the protocol qualifiers. 9918 for (auto *proto : RHS->quals()) { 9919 Context.CollectInheritedProtocols(proto, RHSProtocolSet); 9920 } 9921 9922 // Also add the protocols associated with the RHS interface. 9923 Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); 9924 9925 // Compute the intersection of the collected protocol sets. 9926 for (auto *proto : LHSProtocolSet) { 9927 if (RHSProtocolSet.count(proto)) 9928 IntersectionSet.push_back(proto); 9929 } 9930 9931 // Compute the set of protocols that is implied by either the common type or 9932 // the protocols within the intersection. 9933 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols; 9934 Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); 9935 9936 // Remove any implied protocols from the list of inherited protocols. 9937 if (!ImpliedProtocols.empty()) { 9938 llvm::erase_if(IntersectionSet, [&](ObjCProtocolDecl *proto) -> bool { 9939 return ImpliedProtocols.contains(proto); 9940 }); 9941 } 9942 9943 // Sort the remaining protocols by name. 9944 llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), 9945 compareObjCProtocolsByName); 9946 } 9947 9948 /// Determine whether the first type is a subtype of the second. 9949 static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, 9950 QualType rhs) { 9951 // Common case: two object pointers. 9952 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>(); 9953 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 9954 if (lhsOPT && rhsOPT) 9955 return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); 9956 9957 // Two block pointers. 9958 const auto *lhsBlock = lhs->getAs<BlockPointerType>(); 9959 const auto *rhsBlock = rhs->getAs<BlockPointerType>(); 9960 if (lhsBlock && rhsBlock) 9961 return ctx.typesAreBlockPointerCompatible(lhs, rhs); 9962 9963 // If either is an unqualified 'id' and the other is a block, it's 9964 // acceptable. 9965 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || 9966 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) 9967 return true; 9968 9969 return false; 9970 } 9971 9972 // Check that the given Objective-C type argument lists are equivalent. 9973 static bool sameObjCTypeArgs(ASTContext &ctx, 9974 const ObjCInterfaceDecl *iface, 9975 ArrayRef<QualType> lhsArgs, 9976 ArrayRef<QualType> rhsArgs, 9977 bool stripKindOf) { 9978 if (lhsArgs.size() != rhsArgs.size()) 9979 return false; 9980 9981 ObjCTypeParamList *typeParams = iface->getTypeParamList(); 9982 if (!typeParams) 9983 return false; 9984 9985 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { 9986 if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) 9987 continue; 9988 9989 switch (typeParams->begin()[i]->getVariance()) { 9990 case ObjCTypeParamVariance::Invariant: 9991 if (!stripKindOf || 9992 !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), 9993 rhsArgs[i].stripObjCKindOfType(ctx))) { 9994 return false; 9995 } 9996 break; 9997 9998 case ObjCTypeParamVariance::Covariant: 9999 if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) 10000 return false; 10001 break; 10002 10003 case ObjCTypeParamVariance::Contravariant: 10004 if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) 10005 return false; 10006 break; 10007 } 10008 } 10009 10010 return true; 10011 } 10012 10013 QualType ASTContext::areCommonBaseCompatible( 10014 const ObjCObjectPointerType *Lptr, 10015 const ObjCObjectPointerType *Rptr) { 10016 const ObjCObjectType *LHS = Lptr->getObjectType(); 10017 const ObjCObjectType *RHS = Rptr->getObjectType(); 10018 const ObjCInterfaceDecl* LDecl = LHS->getInterface(); 10019 const ObjCInterfaceDecl* RDecl = RHS->getInterface(); 10020 10021 if (!LDecl || !RDecl) 10022 return {}; 10023 10024 // When either LHS or RHS is a kindof type, we should return a kindof type. 10025 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return 10026 // kindof(A). 10027 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); 10028 10029 // Follow the left-hand side up the class hierarchy until we either hit a 10030 // root or find the RHS. Record the ancestors in case we don't find it. 10031 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4> 10032 LHSAncestors; 10033 while (true) { 10034 // Record this ancestor. We'll need this if the common type isn't in the 10035 // path from the LHS to the root. 10036 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; 10037 10038 if (declaresSameEntity(LHS->getInterface(), RDecl)) { 10039 // Get the type arguments. 10040 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten(); 10041 bool anyChanges = false; 10042 if (LHS->isSpecialized() && RHS->isSpecialized()) { 10043 // Both have type arguments, compare them. 10044 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 10045 LHS->getTypeArgs(), RHS->getTypeArgs(), 10046 /*stripKindOf=*/true)) 10047 return {}; 10048 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 10049 // If only one has type arguments, the result will not have type 10050 // arguments. 10051 LHSTypeArgs = {}; 10052 anyChanges = true; 10053 } 10054 10055 // Compute the intersection of protocols. 10056 SmallVector<ObjCProtocolDecl *, 8> Protocols; 10057 getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, 10058 Protocols); 10059 if (!Protocols.empty()) 10060 anyChanges = true; 10061 10062 // If anything in the LHS will have changed, build a new result type. 10063 // If we need to return a kindof type but LHS is not a kindof type, we 10064 // build a new result type. 10065 if (anyChanges || LHS->isKindOfType() != anyKindOf) { 10066 QualType Result = getObjCInterfaceType(LHS->getInterface()); 10067 Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, 10068 anyKindOf || LHS->isKindOfType()); 10069 return getObjCObjectPointerType(Result); 10070 } 10071 10072 return getObjCObjectPointerType(QualType(LHS, 0)); 10073 } 10074 10075 // Find the superclass. 10076 QualType LHSSuperType = LHS->getSuperClassType(); 10077 if (LHSSuperType.isNull()) 10078 break; 10079 10080 LHS = LHSSuperType->castAs<ObjCObjectType>(); 10081 } 10082 10083 // We didn't find anything by following the LHS to its root; now check 10084 // the RHS against the cached set of ancestors. 10085 while (true) { 10086 auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); 10087 if (KnownLHS != LHSAncestors.end()) { 10088 LHS = KnownLHS->second; 10089 10090 // Get the type arguments. 10091 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten(); 10092 bool anyChanges = false; 10093 if (LHS->isSpecialized() && RHS->isSpecialized()) { 10094 // Both have type arguments, compare them. 10095 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 10096 LHS->getTypeArgs(), RHS->getTypeArgs(), 10097 /*stripKindOf=*/true)) 10098 return {}; 10099 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 10100 // If only one has type arguments, the result will not have type 10101 // arguments. 10102 RHSTypeArgs = {}; 10103 anyChanges = true; 10104 } 10105 10106 // Compute the intersection of protocols. 10107 SmallVector<ObjCProtocolDecl *, 8> Protocols; 10108 getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, 10109 Protocols); 10110 if (!Protocols.empty()) 10111 anyChanges = true; 10112 10113 // If we need to return a kindof type but RHS is not a kindof type, we 10114 // build a new result type. 10115 if (anyChanges || RHS->isKindOfType() != anyKindOf) { 10116 QualType Result = getObjCInterfaceType(RHS->getInterface()); 10117 Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, 10118 anyKindOf || RHS->isKindOfType()); 10119 return getObjCObjectPointerType(Result); 10120 } 10121 10122 return getObjCObjectPointerType(QualType(RHS, 0)); 10123 } 10124 10125 // Find the superclass of the RHS. 10126 QualType RHSSuperType = RHS->getSuperClassType(); 10127 if (RHSSuperType.isNull()) 10128 break; 10129 10130 RHS = RHSSuperType->castAs<ObjCObjectType>(); 10131 } 10132 10133 return {}; 10134 } 10135 10136 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, 10137 const ObjCObjectType *RHS) { 10138 assert(LHS->getInterface() && "LHS is not an interface type"); 10139 assert(RHS->getInterface() && "RHS is not an interface type"); 10140 10141 // Verify that the base decls are compatible: the RHS must be a subclass of 10142 // the LHS. 10143 ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); 10144 bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); 10145 if (!IsSuperClass) 10146 return false; 10147 10148 // If the LHS has protocol qualifiers, determine whether all of them are 10149 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the 10150 // LHS). 10151 if (LHS->getNumProtocols() > 0) { 10152 // OK if conversion of LHS to SuperClass results in narrowing of types 10153 // ; i.e., SuperClass may implement at least one of the protocols 10154 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. 10155 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. 10156 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; 10157 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); 10158 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's 10159 // qualifiers. 10160 for (auto *RHSPI : RHS->quals()) 10161 CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); 10162 // If there is no protocols associated with RHS, it is not a match. 10163 if (SuperClassInheritedProtocols.empty()) 10164 return false; 10165 10166 for (const auto *LHSProto : LHS->quals()) { 10167 bool SuperImplementsProtocol = false; 10168 for (auto *SuperClassProto : SuperClassInheritedProtocols) 10169 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { 10170 SuperImplementsProtocol = true; 10171 break; 10172 } 10173 if (!SuperImplementsProtocol) 10174 return false; 10175 } 10176 } 10177 10178 // If the LHS is specialized, we may need to check type arguments. 10179 if (LHS->isSpecialized()) { 10180 // Follow the superclass chain until we've matched the LHS class in the 10181 // hierarchy. This substitutes type arguments through. 10182 const ObjCObjectType *RHSSuper = RHS; 10183 while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) 10184 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>(); 10185 10186 // If the RHS is specializd, compare type arguments. 10187 if (RHSSuper->isSpecialized() && 10188 !sameObjCTypeArgs(*this, LHS->getInterface(), 10189 LHS->getTypeArgs(), RHSSuper->getTypeArgs(), 10190 /*stripKindOf=*/true)) { 10191 return false; 10192 } 10193 } 10194 10195 return true; 10196 } 10197 10198 bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { 10199 // get the "pointed to" types 10200 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); 10201 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); 10202 10203 if (!LHSOPT || !RHSOPT) 10204 return false; 10205 10206 return canAssignObjCInterfaces(LHSOPT, RHSOPT) || 10207 canAssignObjCInterfaces(RHSOPT, LHSOPT); 10208 } 10209 10210 bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { 10211 return canAssignObjCInterfaces( 10212 getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(), 10213 getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>()); 10214 } 10215 10216 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, 10217 /// both shall have the identically qualified version of a compatible type. 10218 /// C99 6.2.7p1: Two types have compatible types if their types are the 10219 /// same. See 6.7.[2,3,5] for additional rules. 10220 bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, 10221 bool CompareUnqualified) { 10222 if (getLangOpts().CPlusPlus) 10223 return hasSameType(LHS, RHS); 10224 10225 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); 10226 } 10227 10228 bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { 10229 return typesAreCompatible(LHS, RHS); 10230 } 10231 10232 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { 10233 return !mergeTypes(LHS, RHS, true).isNull(); 10234 } 10235 10236 /// mergeTransparentUnionType - if T is a transparent union type and a member 10237 /// of T is compatible with SubType, return the merged type, else return 10238 /// QualType() 10239 QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, 10240 bool OfBlockPointer, 10241 bool Unqualified) { 10242 if (const RecordType *UT = T->getAsUnionType()) { 10243 RecordDecl *UD = UT->getDecl(); 10244 if (UD->hasAttr<TransparentUnionAttr>()) { 10245 for (const auto *I : UD->fields()) { 10246 QualType ET = I->getType().getUnqualifiedType(); 10247 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); 10248 if (!MT.isNull()) 10249 return MT; 10250 } 10251 } 10252 } 10253 10254 return {}; 10255 } 10256 10257 /// mergeFunctionParameterTypes - merge two types which appear as function 10258 /// parameter types 10259 QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, 10260 bool OfBlockPointer, 10261 bool Unqualified) { 10262 // GNU extension: two types are compatible if they appear as a function 10263 // argument, one of the types is a transparent union type and the other 10264 // type is compatible with a union member 10265 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, 10266 Unqualified); 10267 if (!lmerge.isNull()) 10268 return lmerge; 10269 10270 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, 10271 Unqualified); 10272 if (!rmerge.isNull()) 10273 return rmerge; 10274 10275 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); 10276 } 10277 10278 QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, 10279 bool OfBlockPointer, bool Unqualified, 10280 bool AllowCXX, 10281 bool IsConditionalOperator) { 10282 const auto *lbase = lhs->castAs<FunctionType>(); 10283 const auto *rbase = rhs->castAs<FunctionType>(); 10284 const auto *lproto = dyn_cast<FunctionProtoType>(lbase); 10285 const auto *rproto = dyn_cast<FunctionProtoType>(rbase); 10286 bool allLTypes = true; 10287 bool allRTypes = true; 10288 10289 // Check return type 10290 QualType retType; 10291 if (OfBlockPointer) { 10292 QualType RHS = rbase->getReturnType(); 10293 QualType LHS = lbase->getReturnType(); 10294 bool UnqualifiedResult = Unqualified; 10295 if (!UnqualifiedResult) 10296 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); 10297 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); 10298 } 10299 else 10300 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, 10301 Unqualified); 10302 if (retType.isNull()) 10303 return {}; 10304 10305 if (Unqualified) 10306 retType = retType.getUnqualifiedType(); 10307 10308 CanQualType LRetType = getCanonicalType(lbase->getReturnType()); 10309 CanQualType RRetType = getCanonicalType(rbase->getReturnType()); 10310 if (Unqualified) { 10311 LRetType = LRetType.getUnqualifiedType(); 10312 RRetType = RRetType.getUnqualifiedType(); 10313 } 10314 10315 if (getCanonicalType(retType) != LRetType) 10316 allLTypes = false; 10317 if (getCanonicalType(retType) != RRetType) 10318 allRTypes = false; 10319 10320 // FIXME: double check this 10321 // FIXME: should we error if lbase->getRegParmAttr() != 0 && 10322 // rbase->getRegParmAttr() != 0 && 10323 // lbase->getRegParmAttr() != rbase->getRegParmAttr()? 10324 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); 10325 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); 10326 10327 // Compatible functions must have compatible calling conventions 10328 if (lbaseInfo.getCC() != rbaseInfo.getCC()) 10329 return {}; 10330 10331 // Regparm is part of the calling convention. 10332 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) 10333 return {}; 10334 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) 10335 return {}; 10336 10337 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) 10338 return {}; 10339 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) 10340 return {}; 10341 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) 10342 return {}; 10343 10344 // When merging declarations, it's common for supplemental information like 10345 // attributes to only be present in one of the declarations, and we generally 10346 // want type merging to preserve the union of information. So a merged 10347 // function type should be noreturn if it was noreturn in *either* operand 10348 // type. 10349 // 10350 // But for the conditional operator, this is backwards. The result of the 10351 // operator could be either operand, and its type should conservatively 10352 // reflect that. So a function type in a composite type is noreturn only 10353 // if it's noreturn in *both* operand types. 10354 // 10355 // Arguably, noreturn is a kind of subtype, and the conditional operator 10356 // ought to produce the most specific common supertype of its operand types. 10357 // That would differ from this rule in contravariant positions. However, 10358 // neither C nor C++ generally uses this kind of subtype reasoning. Also, 10359 // as a practical matter, it would only affect C code that does abstraction of 10360 // higher-order functions (taking noreturn callbacks!), which is uncommon to 10361 // say the least. So we use the simpler rule. 10362 bool NoReturn = IsConditionalOperator 10363 ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn() 10364 : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); 10365 if (lbaseInfo.getNoReturn() != NoReturn) 10366 allLTypes = false; 10367 if (rbaseInfo.getNoReturn() != NoReturn) 10368 allRTypes = false; 10369 10370 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); 10371 10372 if (lproto && rproto) { // two C99 style function prototypes 10373 assert((AllowCXX || 10374 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && 10375 "C++ shouldn't be here"); 10376 // Compatible functions must have the same number of parameters 10377 if (lproto->getNumParams() != rproto->getNumParams()) 10378 return {}; 10379 10380 // Variadic and non-variadic functions aren't compatible 10381 if (lproto->isVariadic() != rproto->isVariadic()) 10382 return {}; 10383 10384 if (lproto->getMethodQuals() != rproto->getMethodQuals()) 10385 return {}; 10386 10387 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; 10388 bool canUseLeft, canUseRight; 10389 if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, 10390 newParamInfos)) 10391 return {}; 10392 10393 if (!canUseLeft) 10394 allLTypes = false; 10395 if (!canUseRight) 10396 allRTypes = false; 10397 10398 // Check parameter type compatibility 10399 SmallVector<QualType, 10> types; 10400 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { 10401 QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); 10402 QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); 10403 QualType paramType = mergeFunctionParameterTypes( 10404 lParamType, rParamType, OfBlockPointer, Unqualified); 10405 if (paramType.isNull()) 10406 return {}; 10407 10408 if (Unqualified) 10409 paramType = paramType.getUnqualifiedType(); 10410 10411 types.push_back(paramType); 10412 if (Unqualified) { 10413 lParamType = lParamType.getUnqualifiedType(); 10414 rParamType = rParamType.getUnqualifiedType(); 10415 } 10416 10417 if (getCanonicalType(paramType) != getCanonicalType(lParamType)) 10418 allLTypes = false; 10419 if (getCanonicalType(paramType) != getCanonicalType(rParamType)) 10420 allRTypes = false; 10421 } 10422 10423 if (allLTypes) return lhs; 10424 if (allRTypes) return rhs; 10425 10426 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); 10427 EPI.ExtInfo = einfo; 10428 EPI.ExtParameterInfos = 10429 newParamInfos.empty() ? nullptr : newParamInfos.data(); 10430 return getFunctionType(retType, types, EPI); 10431 } 10432 10433 if (lproto) allRTypes = false; 10434 if (rproto) allLTypes = false; 10435 10436 const FunctionProtoType *proto = lproto ? lproto : rproto; 10437 if (proto) { 10438 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here"); 10439 if (proto->isVariadic()) 10440 return {}; 10441 // Check that the types are compatible with the types that 10442 // would result from default argument promotions (C99 6.7.5.3p15). 10443 // The only types actually affected are promotable integer 10444 // types and floats, which would be passed as a different 10445 // type depending on whether the prototype is visible. 10446 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { 10447 QualType paramTy = proto->getParamType(i); 10448 10449 // Look at the converted type of enum types, since that is the type used 10450 // to pass enum values. 10451 if (const auto *Enum = paramTy->getAs<EnumType>()) { 10452 paramTy = Enum->getDecl()->getIntegerType(); 10453 if (paramTy.isNull()) 10454 return {}; 10455 } 10456 10457 if (isPromotableIntegerType(paramTy) || 10458 getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) 10459 return {}; 10460 } 10461 10462 if (allLTypes) return lhs; 10463 if (allRTypes) return rhs; 10464 10465 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); 10466 EPI.ExtInfo = einfo; 10467 return getFunctionType(retType, proto->getParamTypes(), EPI); 10468 } 10469 10470 if (allLTypes) return lhs; 10471 if (allRTypes) return rhs; 10472 return getFunctionNoProtoType(retType, einfo); 10473 } 10474 10475 /// Given that we have an enum type and a non-enum type, try to merge them. 10476 static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, 10477 QualType other, bool isBlockReturnType) { 10478 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, 10479 // a signed integer type, or an unsigned integer type. 10480 // Compatibility is based on the underlying type, not the promotion 10481 // type. 10482 QualType underlyingType = ET->getDecl()->getIntegerType(); 10483 if (underlyingType.isNull()) 10484 return {}; 10485 if (Context.hasSameType(underlyingType, other)) 10486 return other; 10487 10488 // In block return types, we're more permissive and accept any 10489 // integral type of the same size. 10490 if (isBlockReturnType && other->isIntegerType() && 10491 Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) 10492 return other; 10493 10494 return {}; 10495 } 10496 10497 QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer, 10498 bool Unqualified, bool BlockReturnType, 10499 bool IsConditionalOperator) { 10500 // For C++ we will not reach this code with reference types (see below), 10501 // for OpenMP variant call overloading we might. 10502 // 10503 // C++ [expr]: If an expression initially has the type "reference to T", the 10504 // type is adjusted to "T" prior to any further analysis, the expression 10505 // designates the object or function denoted by the reference, and the 10506 // expression is an lvalue unless the reference is an rvalue reference and 10507 // the expression is a function call (possibly inside parentheses). 10508 auto *LHSRefTy = LHS->getAs<ReferenceType>(); 10509 auto *RHSRefTy = RHS->getAs<ReferenceType>(); 10510 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy && 10511 LHS->getTypeClass() == RHS->getTypeClass()) 10512 return mergeTypes(LHSRefTy->getPointeeType(), RHSRefTy->getPointeeType(), 10513 OfBlockPointer, Unqualified, BlockReturnType); 10514 if (LHSRefTy || RHSRefTy) 10515 return {}; 10516 10517 if (Unqualified) { 10518 LHS = LHS.getUnqualifiedType(); 10519 RHS = RHS.getUnqualifiedType(); 10520 } 10521 10522 QualType LHSCan = getCanonicalType(LHS), 10523 RHSCan = getCanonicalType(RHS); 10524 10525 // If two types are identical, they are compatible. 10526 if (LHSCan == RHSCan) 10527 return LHS; 10528 10529 // If the qualifiers are different, the types aren't compatible... mostly. 10530 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10531 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10532 if (LQuals != RQuals) { 10533 // If any of these qualifiers are different, we have a type 10534 // mismatch. 10535 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10536 LQuals.getAddressSpace() != RQuals.getAddressSpace() || 10537 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || 10538 LQuals.hasUnaligned() != RQuals.hasUnaligned()) 10539 return {}; 10540 10541 // Exactly one GC qualifier difference is allowed: __strong is 10542 // okay if the other type has no GC qualifier but is an Objective 10543 // C object pointer (i.e. implicitly strong by default). We fix 10544 // this by pretending that the unqualified type was actually 10545 // qualified __strong. 10546 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10547 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10548 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10549 10550 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10551 return {}; 10552 10553 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { 10554 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); 10555 } 10556 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { 10557 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); 10558 } 10559 return {}; 10560 } 10561 10562 // Okay, qualifiers are equal. 10563 10564 Type::TypeClass LHSClass = LHSCan->getTypeClass(); 10565 Type::TypeClass RHSClass = RHSCan->getTypeClass(); 10566 10567 // We want to consider the two function types to be the same for these 10568 // comparisons, just force one to the other. 10569 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; 10570 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; 10571 10572 // Same as above for arrays 10573 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) 10574 LHSClass = Type::ConstantArray; 10575 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) 10576 RHSClass = Type::ConstantArray; 10577 10578 // ObjCInterfaces are just specialized ObjCObjects. 10579 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; 10580 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; 10581 10582 // Canonicalize ExtVector -> Vector. 10583 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; 10584 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; 10585 10586 // If the canonical type classes don't match. 10587 if (LHSClass != RHSClass) { 10588 // Note that we only have special rules for turning block enum 10589 // returns into block int returns, not vice-versa. 10590 if (const auto *ETy = LHS->getAs<EnumType>()) { 10591 return mergeEnumWithInteger(*this, ETy, RHS, false); 10592 } 10593 if (const EnumType* ETy = RHS->getAs<EnumType>()) { 10594 return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); 10595 } 10596 // allow block pointer type to match an 'id' type. 10597 if (OfBlockPointer && !BlockReturnType) { 10598 if (LHS->isObjCIdType() && RHS->isBlockPointerType()) 10599 return LHS; 10600 if (RHS->isObjCIdType() && LHS->isBlockPointerType()) 10601 return RHS; 10602 } 10603 // Allow __auto_type to match anything; it merges to the type with more 10604 // information. 10605 if (const auto *AT = LHS->getAs<AutoType>()) { 10606 if (!AT->isDeduced() && AT->isGNUAutoType()) 10607 return RHS; 10608 } 10609 if (const auto *AT = RHS->getAs<AutoType>()) { 10610 if (!AT->isDeduced() && AT->isGNUAutoType()) 10611 return LHS; 10612 } 10613 return {}; 10614 } 10615 10616 // The canonical type classes match. 10617 switch (LHSClass) { 10618 #define TYPE(Class, Base) 10619 #define ABSTRACT_TYPE(Class, Base) 10620 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 10621 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 10622 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 10623 #include "clang/AST/TypeNodes.inc" 10624 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 10625 10626 case Type::Auto: 10627 case Type::DeducedTemplateSpecialization: 10628 case Type::LValueReference: 10629 case Type::RValueReference: 10630 case Type::MemberPointer: 10631 llvm_unreachable("C++ should never be in mergeTypes"); 10632 10633 case Type::ObjCInterface: 10634 case Type::IncompleteArray: 10635 case Type::VariableArray: 10636 case Type::FunctionProto: 10637 case Type::ExtVector: 10638 llvm_unreachable("Types are eliminated above"); 10639 10640 case Type::Pointer: 10641 { 10642 // Merge two pointer types, while trying to preserve typedef info 10643 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType(); 10644 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType(); 10645 if (Unqualified) { 10646 LHSPointee = LHSPointee.getUnqualifiedType(); 10647 RHSPointee = RHSPointee.getUnqualifiedType(); 10648 } 10649 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, 10650 Unqualified); 10651 if (ResultType.isNull()) 10652 return {}; 10653 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10654 return LHS; 10655 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10656 return RHS; 10657 return getPointerType(ResultType); 10658 } 10659 case Type::BlockPointer: 10660 { 10661 // Merge two block pointer types, while trying to preserve typedef info 10662 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType(); 10663 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType(); 10664 if (Unqualified) { 10665 LHSPointee = LHSPointee.getUnqualifiedType(); 10666 RHSPointee = RHSPointee.getUnqualifiedType(); 10667 } 10668 if (getLangOpts().OpenCL) { 10669 Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); 10670 Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); 10671 // Blocks can't be an expression in a ternary operator (OpenCL v2.0 10672 // 6.12.5) thus the following check is asymmetric. 10673 if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) 10674 return {}; 10675 LHSPteeQual.removeAddressSpace(); 10676 RHSPteeQual.removeAddressSpace(); 10677 LHSPointee = 10678 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); 10679 RHSPointee = 10680 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); 10681 } 10682 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, 10683 Unqualified); 10684 if (ResultType.isNull()) 10685 return {}; 10686 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10687 return LHS; 10688 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10689 return RHS; 10690 return getBlockPointerType(ResultType); 10691 } 10692 case Type::Atomic: 10693 { 10694 // Merge two pointer types, while trying to preserve typedef info 10695 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType(); 10696 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType(); 10697 if (Unqualified) { 10698 LHSValue = LHSValue.getUnqualifiedType(); 10699 RHSValue = RHSValue.getUnqualifiedType(); 10700 } 10701 QualType ResultType = mergeTypes(LHSValue, RHSValue, false, 10702 Unqualified); 10703 if (ResultType.isNull()) 10704 return {}; 10705 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) 10706 return LHS; 10707 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) 10708 return RHS; 10709 return getAtomicType(ResultType); 10710 } 10711 case Type::ConstantArray: 10712 { 10713 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); 10714 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); 10715 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) 10716 return {}; 10717 10718 QualType LHSElem = getAsArrayType(LHS)->getElementType(); 10719 QualType RHSElem = getAsArrayType(RHS)->getElementType(); 10720 if (Unqualified) { 10721 LHSElem = LHSElem.getUnqualifiedType(); 10722 RHSElem = RHSElem.getUnqualifiedType(); 10723 } 10724 10725 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); 10726 if (ResultType.isNull()) 10727 return {}; 10728 10729 const VariableArrayType* LVAT = getAsVariableArrayType(LHS); 10730 const VariableArrayType* RVAT = getAsVariableArrayType(RHS); 10731 10732 // If either side is a variable array, and both are complete, check whether 10733 // the current dimension is definite. 10734 if (LVAT || RVAT) { 10735 auto SizeFetch = [this](const VariableArrayType* VAT, 10736 const ConstantArrayType* CAT) 10737 -> std::pair<bool,llvm::APInt> { 10738 if (VAT) { 10739 std::optional<llvm::APSInt> TheInt; 10740 Expr *E = VAT->getSizeExpr(); 10741 if (E && (TheInt = E->getIntegerConstantExpr(*this))) 10742 return std::make_pair(true, *TheInt); 10743 return std::make_pair(false, llvm::APSInt()); 10744 } 10745 if (CAT) 10746 return std::make_pair(true, CAT->getSize()); 10747 return std::make_pair(false, llvm::APInt()); 10748 }; 10749 10750 bool HaveLSize, HaveRSize; 10751 llvm::APInt LSize, RSize; 10752 std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); 10753 std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); 10754 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) 10755 return {}; // Definite, but unequal, array dimension 10756 } 10757 10758 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10759 return LHS; 10760 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10761 return RHS; 10762 if (LCAT) 10763 return getConstantArrayType(ResultType, LCAT->getSize(), 10764 LCAT->getSizeExpr(), ArraySizeModifier(), 0); 10765 if (RCAT) 10766 return getConstantArrayType(ResultType, RCAT->getSize(), 10767 RCAT->getSizeExpr(), ArraySizeModifier(), 0); 10768 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10769 return LHS; 10770 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10771 return RHS; 10772 if (LVAT) { 10773 // FIXME: This isn't correct! But tricky to implement because 10774 // the array's size has to be the size of LHS, but the type 10775 // has to be different. 10776 return LHS; 10777 } 10778 if (RVAT) { 10779 // FIXME: This isn't correct! But tricky to implement because 10780 // the array's size has to be the size of RHS, but the type 10781 // has to be different. 10782 return RHS; 10783 } 10784 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; 10785 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; 10786 return getIncompleteArrayType(ResultType, ArraySizeModifier(), 0); 10787 } 10788 case Type::FunctionNoProto: 10789 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified, 10790 /*AllowCXX=*/false, IsConditionalOperator); 10791 case Type::Record: 10792 case Type::Enum: 10793 return {}; 10794 case Type::Builtin: 10795 // Only exactly equal builtin types are compatible, which is tested above. 10796 return {}; 10797 case Type::Complex: 10798 // Distinct complex types are incompatible. 10799 return {}; 10800 case Type::Vector: 10801 // FIXME: The merged type should be an ExtVector! 10802 if (areCompatVectorTypes(LHSCan->castAs<VectorType>(), 10803 RHSCan->castAs<VectorType>())) 10804 return LHS; 10805 return {}; 10806 case Type::ConstantMatrix: 10807 if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(), 10808 RHSCan->castAs<ConstantMatrixType>())) 10809 return LHS; 10810 return {}; 10811 case Type::ObjCObject: { 10812 // Check if the types are assignment compatible. 10813 // FIXME: This should be type compatibility, e.g. whether 10814 // "LHS x; RHS x;" at global scope is legal. 10815 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(), 10816 RHS->castAs<ObjCObjectType>())) 10817 return LHS; 10818 return {}; 10819 } 10820 case Type::ObjCObjectPointer: 10821 if (OfBlockPointer) { 10822 if (canAssignObjCInterfacesInBlockPointer( 10823 LHS->castAs<ObjCObjectPointerType>(), 10824 RHS->castAs<ObjCObjectPointerType>(), BlockReturnType)) 10825 return LHS; 10826 return {}; 10827 } 10828 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(), 10829 RHS->castAs<ObjCObjectPointerType>())) 10830 return LHS; 10831 return {}; 10832 case Type::Pipe: 10833 assert(LHS != RHS && 10834 "Equivalent pipe types should have already been handled!"); 10835 return {}; 10836 case Type::BitInt: { 10837 // Merge two bit-precise int types, while trying to preserve typedef info. 10838 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned(); 10839 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned(); 10840 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits(); 10841 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits(); 10842 10843 // Like unsigned/int, shouldn't have a type if they don't match. 10844 if (LHSUnsigned != RHSUnsigned) 10845 return {}; 10846 10847 if (LHSBits != RHSBits) 10848 return {}; 10849 return LHS; 10850 } 10851 } 10852 10853 llvm_unreachable("Invalid Type::Class!"); 10854 } 10855 10856 bool ASTContext::mergeExtParameterInfo( 10857 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, 10858 bool &CanUseFirst, bool &CanUseSecond, 10859 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) { 10860 assert(NewParamInfos.empty() && "param info list not empty"); 10861 CanUseFirst = CanUseSecond = true; 10862 bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); 10863 bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); 10864 10865 // Fast path: if the first type doesn't have ext parameter infos, 10866 // we match if and only if the second type also doesn't have them. 10867 if (!FirstHasInfo && !SecondHasInfo) 10868 return true; 10869 10870 bool NeedParamInfo = false; 10871 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() 10872 : SecondFnType->getExtParameterInfos().size(); 10873 10874 for (size_t I = 0; I < E; ++I) { 10875 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; 10876 if (FirstHasInfo) 10877 FirstParam = FirstFnType->getExtParameterInfo(I); 10878 if (SecondHasInfo) 10879 SecondParam = SecondFnType->getExtParameterInfo(I); 10880 10881 // Cannot merge unless everything except the noescape flag matches. 10882 if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) 10883 return false; 10884 10885 bool FirstNoEscape = FirstParam.isNoEscape(); 10886 bool SecondNoEscape = SecondParam.isNoEscape(); 10887 bool IsNoEscape = FirstNoEscape && SecondNoEscape; 10888 NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); 10889 if (NewParamInfos.back().getOpaqueValue()) 10890 NeedParamInfo = true; 10891 if (FirstNoEscape != IsNoEscape) 10892 CanUseFirst = false; 10893 if (SecondNoEscape != IsNoEscape) 10894 CanUseSecond = false; 10895 } 10896 10897 if (!NeedParamInfo) 10898 NewParamInfos.clear(); 10899 10900 return true; 10901 } 10902 10903 void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { 10904 ObjCLayouts[CD] = nullptr; 10905 } 10906 10907 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and 10908 /// 'RHS' attributes and returns the merged version; including for function 10909 /// return types. 10910 QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { 10911 QualType LHSCan = getCanonicalType(LHS), 10912 RHSCan = getCanonicalType(RHS); 10913 // If two types are identical, they are compatible. 10914 if (LHSCan == RHSCan) 10915 return LHS; 10916 if (RHSCan->isFunctionType()) { 10917 if (!LHSCan->isFunctionType()) 10918 return {}; 10919 QualType OldReturnType = 10920 cast<FunctionType>(RHSCan.getTypePtr())->getReturnType(); 10921 QualType NewReturnType = 10922 cast<FunctionType>(LHSCan.getTypePtr())->getReturnType(); 10923 QualType ResReturnType = 10924 mergeObjCGCQualifiers(NewReturnType, OldReturnType); 10925 if (ResReturnType.isNull()) 10926 return {}; 10927 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { 10928 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); 10929 // In either case, use OldReturnType to build the new function type. 10930 const auto *F = LHS->castAs<FunctionType>(); 10931 if (const auto *FPT = cast<FunctionProtoType>(F)) { 10932 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 10933 EPI.ExtInfo = getFunctionExtInfo(LHS); 10934 QualType ResultType = 10935 getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); 10936 return ResultType; 10937 } 10938 } 10939 return {}; 10940 } 10941 10942 // If the qualifiers are different, the types can still be merged. 10943 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10944 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10945 if (LQuals != RQuals) { 10946 // If any of these qualifiers are different, we have a type mismatch. 10947 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10948 LQuals.getAddressSpace() != RQuals.getAddressSpace()) 10949 return {}; 10950 10951 // Exactly one GC qualifier difference is allowed: __strong is 10952 // okay if the other type has no GC qualifier but is an Objective 10953 // C object pointer (i.e. implicitly strong by default). We fix 10954 // this by pretending that the unqualified type was actually 10955 // qualified __strong. 10956 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10957 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10958 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10959 10960 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10961 return {}; 10962 10963 if (GC_L == Qualifiers::Strong) 10964 return LHS; 10965 if (GC_R == Qualifiers::Strong) 10966 return RHS; 10967 return {}; 10968 } 10969 10970 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { 10971 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10972 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10973 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); 10974 if (ResQT == LHSBaseQT) 10975 return LHS; 10976 if (ResQT == RHSBaseQT) 10977 return RHS; 10978 } 10979 return {}; 10980 } 10981 10982 //===----------------------------------------------------------------------===// 10983 // Integer Predicates 10984 //===----------------------------------------------------------------------===// 10985 10986 unsigned ASTContext::getIntWidth(QualType T) const { 10987 if (const auto *ET = T->getAs<EnumType>()) 10988 T = ET->getDecl()->getIntegerType(); 10989 if (T->isBooleanType()) 10990 return 1; 10991 if (const auto *EIT = T->getAs<BitIntType>()) 10992 return EIT->getNumBits(); 10993 // For builtin types, just use the standard type sizing method 10994 return (unsigned)getTypeSize(T); 10995 } 10996 10997 QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { 10998 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 10999 T->isFixedPointType()) && 11000 "Unexpected type"); 11001 11002 // Turn <4 x signed int> -> <4 x unsigned int> 11003 if (const auto *VTy = T->getAs<VectorType>()) 11004 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), 11005 VTy->getNumElements(), VTy->getVectorKind()); 11006 11007 // For _BitInt, return an unsigned _BitInt with same width. 11008 if (const auto *EITy = T->getAs<BitIntType>()) 11009 return getBitIntType(/*Unsigned=*/true, EITy->getNumBits()); 11010 11011 // For enums, get the underlying integer type of the enum, and let the general 11012 // integer type signchanging code handle it. 11013 if (const auto *ETy = T->getAs<EnumType>()) 11014 T = ETy->getDecl()->getIntegerType(); 11015 11016 switch (T->castAs<BuiltinType>()->getKind()) { 11017 case BuiltinType::Char_U: 11018 // Plain `char` is mapped to `unsigned char` even if it's already unsigned 11019 case BuiltinType::Char_S: 11020 case BuiltinType::SChar: 11021 case BuiltinType::Char8: 11022 return UnsignedCharTy; 11023 case BuiltinType::Short: 11024 return UnsignedShortTy; 11025 case BuiltinType::Int: 11026 return UnsignedIntTy; 11027 case BuiltinType::Long: 11028 return UnsignedLongTy; 11029 case BuiltinType::LongLong: 11030 return UnsignedLongLongTy; 11031 case BuiltinType::Int128: 11032 return UnsignedInt128Ty; 11033 // wchar_t is special. It is either signed or not, but when it's signed, 11034 // there's no matching "unsigned wchar_t". Therefore we return the unsigned 11035 // version of its underlying type instead. 11036 case BuiltinType::WChar_S: 11037 return getUnsignedWCharType(); 11038 11039 case BuiltinType::ShortAccum: 11040 return UnsignedShortAccumTy; 11041 case BuiltinType::Accum: 11042 return UnsignedAccumTy; 11043 case BuiltinType::LongAccum: 11044 return UnsignedLongAccumTy; 11045 case BuiltinType::SatShortAccum: 11046 return SatUnsignedShortAccumTy; 11047 case BuiltinType::SatAccum: 11048 return SatUnsignedAccumTy; 11049 case BuiltinType::SatLongAccum: 11050 return SatUnsignedLongAccumTy; 11051 case BuiltinType::ShortFract: 11052 return UnsignedShortFractTy; 11053 case BuiltinType::Fract: 11054 return UnsignedFractTy; 11055 case BuiltinType::LongFract: 11056 return UnsignedLongFractTy; 11057 case BuiltinType::SatShortFract: 11058 return SatUnsignedShortFractTy; 11059 case BuiltinType::SatFract: 11060 return SatUnsignedFractTy; 11061 case BuiltinType::SatLongFract: 11062 return SatUnsignedLongFractTy; 11063 default: 11064 assert((T->hasUnsignedIntegerRepresentation() || 11065 T->isUnsignedFixedPointType()) && 11066 "Unexpected signed integer or fixed point type"); 11067 return T; 11068 } 11069 } 11070 11071 QualType ASTContext::getCorrespondingSignedType(QualType T) const { 11072 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 11073 T->isFixedPointType()) && 11074 "Unexpected type"); 11075 11076 // Turn <4 x unsigned int> -> <4 x signed int> 11077 if (const auto *VTy = T->getAs<VectorType>()) 11078 return getVectorType(getCorrespondingSignedType(VTy->getElementType()), 11079 VTy->getNumElements(), VTy->getVectorKind()); 11080 11081 // For _BitInt, return a signed _BitInt with same width. 11082 if (const auto *EITy = T->getAs<BitIntType>()) 11083 return getBitIntType(/*Unsigned=*/false, EITy->getNumBits()); 11084 11085 // For enums, get the underlying integer type of the enum, and let the general 11086 // integer type signchanging code handle it. 11087 if (const auto *ETy = T->getAs<EnumType>()) 11088 T = ETy->getDecl()->getIntegerType(); 11089 11090 switch (T->castAs<BuiltinType>()->getKind()) { 11091 case BuiltinType::Char_S: 11092 // Plain `char` is mapped to `signed char` even if it's already signed 11093 case BuiltinType::Char_U: 11094 case BuiltinType::UChar: 11095 case BuiltinType::Char8: 11096 return SignedCharTy; 11097 case BuiltinType::UShort: 11098 return ShortTy; 11099 case BuiltinType::UInt: 11100 return IntTy; 11101 case BuiltinType::ULong: 11102 return LongTy; 11103 case BuiltinType::ULongLong: 11104 return LongLongTy; 11105 case BuiltinType::UInt128: 11106 return Int128Ty; 11107 // wchar_t is special. It is either unsigned or not, but when it's unsigned, 11108 // there's no matching "signed wchar_t". Therefore we return the signed 11109 // version of its underlying type instead. 11110 case BuiltinType::WChar_U: 11111 return getSignedWCharType(); 11112 11113 case BuiltinType::UShortAccum: 11114 return ShortAccumTy; 11115 case BuiltinType::UAccum: 11116 return AccumTy; 11117 case BuiltinType::ULongAccum: 11118 return LongAccumTy; 11119 case BuiltinType::SatUShortAccum: 11120 return SatShortAccumTy; 11121 case BuiltinType::SatUAccum: 11122 return SatAccumTy; 11123 case BuiltinType::SatULongAccum: 11124 return SatLongAccumTy; 11125 case BuiltinType::UShortFract: 11126 return ShortFractTy; 11127 case BuiltinType::UFract: 11128 return FractTy; 11129 case BuiltinType::ULongFract: 11130 return LongFractTy; 11131 case BuiltinType::SatUShortFract: 11132 return SatShortFractTy; 11133 case BuiltinType::SatUFract: 11134 return SatFractTy; 11135 case BuiltinType::SatULongFract: 11136 return SatLongFractTy; 11137 default: 11138 assert( 11139 (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && 11140 "Unexpected signed integer or fixed point type"); 11141 return T; 11142 } 11143 } 11144 11145 ASTMutationListener::~ASTMutationListener() = default; 11146 11147 void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, 11148 QualType ReturnType) {} 11149 11150 //===----------------------------------------------------------------------===// 11151 // Builtin Type Computation 11152 //===----------------------------------------------------------------------===// 11153 11154 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the 11155 /// pointer over the consumed characters. This returns the resultant type. If 11156 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic 11157 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of 11158 /// a vector of "i*". 11159 /// 11160 /// RequiresICE is filled in on return to indicate whether the value is required 11161 /// to be an Integer Constant Expression. 11162 static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, 11163 ASTContext::GetBuiltinTypeError &Error, 11164 bool &RequiresICE, 11165 bool AllowTypeModifiers) { 11166 // Modifiers. 11167 int HowLong = 0; 11168 bool Signed = false, Unsigned = false; 11169 RequiresICE = false; 11170 11171 // Read the prefixed modifiers first. 11172 bool Done = false; 11173 #ifndef NDEBUG 11174 bool IsSpecial = false; 11175 #endif 11176 while (!Done) { 11177 switch (*Str++) { 11178 default: Done = true; --Str; break; 11179 case 'I': 11180 RequiresICE = true; 11181 break; 11182 case 'S': 11183 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); 11184 assert(!Signed && "Can't use 'S' modifier multiple times!"); 11185 Signed = true; 11186 break; 11187 case 'U': 11188 assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); 11189 assert(!Unsigned && "Can't use 'U' modifier multiple times!"); 11190 Unsigned = true; 11191 break; 11192 case 'L': 11193 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers"); 11194 assert(HowLong <= 2 && "Can't have LLLL modifier"); 11195 ++HowLong; 11196 break; 11197 case 'N': 11198 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. 11199 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11200 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!"); 11201 #ifndef NDEBUG 11202 IsSpecial = true; 11203 #endif 11204 if (Context.getTargetInfo().getLongWidth() == 32) 11205 ++HowLong; 11206 break; 11207 case 'W': 11208 // This modifier represents int64 type. 11209 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11210 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!"); 11211 #ifndef NDEBUG 11212 IsSpecial = true; 11213 #endif 11214 switch (Context.getTargetInfo().getInt64Type()) { 11215 default: 11216 llvm_unreachable("Unexpected integer type"); 11217 case TargetInfo::SignedLong: 11218 HowLong = 1; 11219 break; 11220 case TargetInfo::SignedLongLong: 11221 HowLong = 2; 11222 break; 11223 } 11224 break; 11225 case 'Z': 11226 // This modifier represents int32 type. 11227 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11228 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!"); 11229 #ifndef NDEBUG 11230 IsSpecial = true; 11231 #endif 11232 switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { 11233 default: 11234 llvm_unreachable("Unexpected integer type"); 11235 case TargetInfo::SignedInt: 11236 HowLong = 0; 11237 break; 11238 case TargetInfo::SignedLong: 11239 HowLong = 1; 11240 break; 11241 case TargetInfo::SignedLongLong: 11242 HowLong = 2; 11243 break; 11244 } 11245 break; 11246 case 'O': 11247 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11248 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!"); 11249 #ifndef NDEBUG 11250 IsSpecial = true; 11251 #endif 11252 if (Context.getLangOpts().OpenCL) 11253 HowLong = 1; 11254 else 11255 HowLong = 2; 11256 break; 11257 } 11258 } 11259 11260 QualType Type; 11261 11262 // Read the base type. 11263 switch (*Str++) { 11264 default: llvm_unreachable("Unknown builtin type letter!"); 11265 case 'x': 11266 assert(HowLong == 0 && !Signed && !Unsigned && 11267 "Bad modifiers used with 'x'!"); 11268 Type = Context.Float16Ty; 11269 break; 11270 case 'y': 11271 assert(HowLong == 0 && !Signed && !Unsigned && 11272 "Bad modifiers used with 'y'!"); 11273 Type = Context.BFloat16Ty; 11274 break; 11275 case 'v': 11276 assert(HowLong == 0 && !Signed && !Unsigned && 11277 "Bad modifiers used with 'v'!"); 11278 Type = Context.VoidTy; 11279 break; 11280 case 'h': 11281 assert(HowLong == 0 && !Signed && !Unsigned && 11282 "Bad modifiers used with 'h'!"); 11283 Type = Context.HalfTy; 11284 break; 11285 case 'f': 11286 assert(HowLong == 0 && !Signed && !Unsigned && 11287 "Bad modifiers used with 'f'!"); 11288 Type = Context.FloatTy; 11289 break; 11290 case 'd': 11291 assert(HowLong < 3 && !Signed && !Unsigned && 11292 "Bad modifiers used with 'd'!"); 11293 if (HowLong == 1) 11294 Type = Context.LongDoubleTy; 11295 else if (HowLong == 2) 11296 Type = Context.Float128Ty; 11297 else 11298 Type = Context.DoubleTy; 11299 break; 11300 case 's': 11301 assert(HowLong == 0 && "Bad modifiers used with 's'!"); 11302 if (Unsigned) 11303 Type = Context.UnsignedShortTy; 11304 else 11305 Type = Context.ShortTy; 11306 break; 11307 case 'i': 11308 if (HowLong == 3) 11309 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; 11310 else if (HowLong == 2) 11311 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; 11312 else if (HowLong == 1) 11313 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; 11314 else 11315 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; 11316 break; 11317 case 'c': 11318 assert(HowLong == 0 && "Bad modifiers used with 'c'!"); 11319 if (Signed) 11320 Type = Context.SignedCharTy; 11321 else if (Unsigned) 11322 Type = Context.UnsignedCharTy; 11323 else 11324 Type = Context.CharTy; 11325 break; 11326 case 'b': // boolean 11327 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); 11328 Type = Context.BoolTy; 11329 break; 11330 case 'z': // size_t. 11331 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); 11332 Type = Context.getSizeType(); 11333 break; 11334 case 'w': // wchar_t. 11335 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!"); 11336 Type = Context.getWideCharType(); 11337 break; 11338 case 'F': 11339 Type = Context.getCFConstantStringType(); 11340 break; 11341 case 'G': 11342 Type = Context.getObjCIdType(); 11343 break; 11344 case 'H': 11345 Type = Context.getObjCSelType(); 11346 break; 11347 case 'M': 11348 Type = Context.getObjCSuperType(); 11349 break; 11350 case 'a': 11351 Type = Context.getBuiltinVaListType(); 11352 assert(!Type.isNull() && "builtin va list type not initialized!"); 11353 break; 11354 case 'A': 11355 // This is a "reference" to a va_list; however, what exactly 11356 // this means depends on how va_list is defined. There are two 11357 // different kinds of va_list: ones passed by value, and ones 11358 // passed by reference. An example of a by-value va_list is 11359 // x86, where va_list is a char*. An example of by-ref va_list 11360 // is x86-64, where va_list is a __va_list_tag[1]. For x86, 11361 // we want this argument to be a char*&; for x86-64, we want 11362 // it to be a __va_list_tag*. 11363 Type = Context.getBuiltinVaListType(); 11364 assert(!Type.isNull() && "builtin va list type not initialized!"); 11365 if (Type->isArrayType()) 11366 Type = Context.getArrayDecayedType(Type); 11367 else 11368 Type = Context.getLValueReferenceType(Type); 11369 break; 11370 case 'q': { 11371 char *End; 11372 unsigned NumElements = strtoul(Str, &End, 10); 11373 assert(End != Str && "Missing vector size"); 11374 Str = End; 11375 11376 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11377 RequiresICE, false); 11378 assert(!RequiresICE && "Can't require vector ICE"); 11379 11380 Type = Context.getScalableVectorType(ElementType, NumElements); 11381 break; 11382 } 11383 case 'Q': { 11384 switch (*Str++) { 11385 case 'a': { 11386 Type = Context.SveCountTy; 11387 break; 11388 } 11389 default: 11390 llvm_unreachable("Unexpected target builtin type"); 11391 } 11392 break; 11393 } 11394 case 'V': { 11395 char *End; 11396 unsigned NumElements = strtoul(Str, &End, 10); 11397 assert(End != Str && "Missing vector size"); 11398 Str = End; 11399 11400 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11401 RequiresICE, false); 11402 assert(!RequiresICE && "Can't require vector ICE"); 11403 11404 // TODO: No way to make AltiVec vectors in builtins yet. 11405 Type = Context.getVectorType(ElementType, NumElements, VectorKind::Generic); 11406 break; 11407 } 11408 case 'E': { 11409 char *End; 11410 11411 unsigned NumElements = strtoul(Str, &End, 10); 11412 assert(End != Str && "Missing vector size"); 11413 11414 Str = End; 11415 11416 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11417 false); 11418 Type = Context.getExtVectorType(ElementType, NumElements); 11419 break; 11420 } 11421 case 'X': { 11422 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11423 false); 11424 assert(!RequiresICE && "Can't require complex ICE"); 11425 Type = Context.getComplexType(ElementType); 11426 break; 11427 } 11428 case 'Y': 11429 Type = Context.getPointerDiffType(); 11430 break; 11431 case 'P': 11432 Type = Context.getFILEType(); 11433 if (Type.isNull()) { 11434 Error = ASTContext::GE_Missing_stdio; 11435 return {}; 11436 } 11437 break; 11438 case 'J': 11439 if (Signed) 11440 Type = Context.getsigjmp_bufType(); 11441 else 11442 Type = Context.getjmp_bufType(); 11443 11444 if (Type.isNull()) { 11445 Error = ASTContext::GE_Missing_setjmp; 11446 return {}; 11447 } 11448 break; 11449 case 'K': 11450 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); 11451 Type = Context.getucontext_tType(); 11452 11453 if (Type.isNull()) { 11454 Error = ASTContext::GE_Missing_ucontext; 11455 return {}; 11456 } 11457 break; 11458 case 'p': 11459 Type = Context.getProcessIDType(); 11460 break; 11461 } 11462 11463 // If there are modifiers and if we're allowed to parse them, go for it. 11464 Done = !AllowTypeModifiers; 11465 while (!Done) { 11466 switch (char c = *Str++) { 11467 default: Done = true; --Str; break; 11468 case '*': 11469 case '&': { 11470 // Both pointers and references can have their pointee types 11471 // qualified with an address space. 11472 char *End; 11473 unsigned AddrSpace = strtoul(Str, &End, 10); 11474 if (End != Str) { 11475 // Note AddrSpace == 0 is not the same as an unspecified address space. 11476 Type = Context.getAddrSpaceQualType( 11477 Type, 11478 Context.getLangASForBuiltinAddressSpace(AddrSpace)); 11479 Str = End; 11480 } 11481 if (c == '*') 11482 Type = Context.getPointerType(Type); 11483 else 11484 Type = Context.getLValueReferenceType(Type); 11485 break; 11486 } 11487 // FIXME: There's no way to have a built-in with an rvalue ref arg. 11488 case 'C': 11489 Type = Type.withConst(); 11490 break; 11491 case 'D': 11492 Type = Context.getVolatileType(Type); 11493 break; 11494 case 'R': 11495 Type = Type.withRestrict(); 11496 break; 11497 } 11498 } 11499 11500 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && 11501 "Integer constant 'I' type must be an integer"); 11502 11503 return Type; 11504 } 11505 11506 // On some targets such as PowerPC, some of the builtins are defined with custom 11507 // type descriptors for target-dependent types. These descriptors are decoded in 11508 // other functions, but it may be useful to be able to fall back to default 11509 // descriptor decoding to define builtins mixing target-dependent and target- 11510 // independent types. This function allows decoding one type descriptor with 11511 // default decoding. 11512 QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context, 11513 GetBuiltinTypeError &Error, bool &RequireICE, 11514 bool AllowTypeModifiers) const { 11515 return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers); 11516 } 11517 11518 /// GetBuiltinType - Return the type for the specified builtin. 11519 QualType ASTContext::GetBuiltinType(unsigned Id, 11520 GetBuiltinTypeError &Error, 11521 unsigned *IntegerConstantArgs) const { 11522 const char *TypeStr = BuiltinInfo.getTypeString(Id); 11523 if (TypeStr[0] == '\0') { 11524 Error = GE_Missing_type; 11525 return {}; 11526 } 11527 11528 SmallVector<QualType, 8> ArgTypes; 11529 11530 bool RequiresICE = false; 11531 Error = GE_None; 11532 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, 11533 RequiresICE, true); 11534 if (Error != GE_None) 11535 return {}; 11536 11537 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); 11538 11539 while (TypeStr[0] && TypeStr[0] != '.') { 11540 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); 11541 if (Error != GE_None) 11542 return {}; 11543 11544 // If this argument is required to be an IntegerConstantExpression and the 11545 // caller cares, fill in the bitmask we return. 11546 if (RequiresICE && IntegerConstantArgs) 11547 *IntegerConstantArgs |= 1 << ArgTypes.size(); 11548 11549 // Do array -> pointer decay. The builtin should use the decayed type. 11550 if (Ty->isArrayType()) 11551 Ty = getArrayDecayedType(Ty); 11552 11553 ArgTypes.push_back(Ty); 11554 } 11555 11556 if (Id == Builtin::BI__GetExceptionInfo) 11557 return {}; 11558 11559 assert((TypeStr[0] != '.' || TypeStr[1] == 0) && 11560 "'.' should only occur at end of builtin type list!"); 11561 11562 bool Variadic = (TypeStr[0] == '.'); 11563 11564 FunctionType::ExtInfo EI(getDefaultCallingConvention( 11565 Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); 11566 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); 11567 11568 11569 // We really shouldn't be making a no-proto type here. 11570 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes()) 11571 return getFunctionNoProtoType(ResType, EI); 11572 11573 FunctionProtoType::ExtProtoInfo EPI; 11574 EPI.ExtInfo = EI; 11575 EPI.Variadic = Variadic; 11576 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) 11577 EPI.ExceptionSpec.Type = 11578 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; 11579 11580 return getFunctionType(ResType, ArgTypes, EPI); 11581 } 11582 11583 static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, 11584 const FunctionDecl *FD) { 11585 if (!FD->isExternallyVisible()) 11586 return GVA_Internal; 11587 11588 // Non-user-provided functions get emitted as weak definitions with every 11589 // use, no matter whether they've been explicitly instantiated etc. 11590 if (!FD->isUserProvided()) 11591 return GVA_DiscardableODR; 11592 11593 GVALinkage External; 11594 switch (FD->getTemplateSpecializationKind()) { 11595 case TSK_Undeclared: 11596 case TSK_ExplicitSpecialization: 11597 External = GVA_StrongExternal; 11598 break; 11599 11600 case TSK_ExplicitInstantiationDefinition: 11601 return GVA_StrongODR; 11602 11603 // C++11 [temp.explicit]p10: 11604 // [ Note: The intent is that an inline function that is the subject of 11605 // an explicit instantiation declaration will still be implicitly 11606 // instantiated when used so that the body can be considered for 11607 // inlining, but that no out-of-line copy of the inline function would be 11608 // generated in the translation unit. -- end note ] 11609 case TSK_ExplicitInstantiationDeclaration: 11610 return GVA_AvailableExternally; 11611 11612 case TSK_ImplicitInstantiation: 11613 External = GVA_DiscardableODR; 11614 break; 11615 } 11616 11617 if (!FD->isInlined()) 11618 return External; 11619 11620 if ((!Context.getLangOpts().CPlusPlus && 11621 !Context.getTargetInfo().getCXXABI().isMicrosoft() && 11622 !FD->hasAttr<DLLExportAttr>()) || 11623 FD->hasAttr<GNUInlineAttr>()) { 11624 // FIXME: This doesn't match gcc's behavior for dllexport inline functions. 11625 11626 // GNU or C99 inline semantics. Determine whether this symbol should be 11627 // externally visible. 11628 if (FD->isInlineDefinitionExternallyVisible()) 11629 return External; 11630 11631 // C99 inline semantics, where the symbol is not externally visible. 11632 return GVA_AvailableExternally; 11633 } 11634 11635 // Functions specified with extern and inline in -fms-compatibility mode 11636 // forcibly get emitted. While the body of the function cannot be later 11637 // replaced, the function definition cannot be discarded. 11638 if (FD->isMSExternInline()) 11639 return GVA_StrongODR; 11640 11641 if (Context.getTargetInfo().getCXXABI().isMicrosoft() && 11642 isa<CXXConstructorDecl>(FD) && 11643 cast<CXXConstructorDecl>(FD)->isInheritingConstructor()) 11644 // Our approach to inheriting constructors is fundamentally different from 11645 // that used by the MS ABI, so keep our inheriting constructor thunks 11646 // internal rather than trying to pick an unambiguous mangling for them. 11647 return GVA_Internal; 11648 11649 return GVA_DiscardableODR; 11650 } 11651 11652 static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, 11653 const Decl *D, GVALinkage L) { 11654 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx 11655 // dllexport/dllimport on inline functions. 11656 if (D->hasAttr<DLLImportAttr>()) { 11657 if (L == GVA_DiscardableODR || L == GVA_StrongODR) 11658 return GVA_AvailableExternally; 11659 } else if (D->hasAttr<DLLExportAttr>()) { 11660 if (L == GVA_DiscardableODR) 11661 return GVA_StrongODR; 11662 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) { 11663 // Device-side functions with __global__ attribute must always be 11664 // visible externally so they can be launched from host. 11665 if (D->hasAttr<CUDAGlobalAttr>() && 11666 (L == GVA_DiscardableODR || L == GVA_Internal)) 11667 return GVA_StrongODR; 11668 // Single source offloading languages like CUDA/HIP need to be able to 11669 // access static device variables from host code of the same compilation 11670 // unit. This is done by externalizing the static variable with a shared 11671 // name between the host and device compilation which is the same for the 11672 // same compilation unit whereas different among different compilation 11673 // units. 11674 if (Context.shouldExternalize(D)) 11675 return GVA_StrongExternal; 11676 } 11677 return L; 11678 } 11679 11680 /// Adjust the GVALinkage for a declaration based on what an external AST source 11681 /// knows about whether there can be other definitions of this declaration. 11682 static GVALinkage 11683 adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, 11684 GVALinkage L) { 11685 ExternalASTSource *Source = Ctx.getExternalSource(); 11686 if (!Source) 11687 return L; 11688 11689 switch (Source->hasExternalDefinitions(D)) { 11690 case ExternalASTSource::EK_Never: 11691 // Other translation units rely on us to provide the definition. 11692 if (L == GVA_DiscardableODR) 11693 return GVA_StrongODR; 11694 break; 11695 11696 case ExternalASTSource::EK_Always: 11697 return GVA_AvailableExternally; 11698 11699 case ExternalASTSource::EK_ReplyHazy: 11700 break; 11701 } 11702 return L; 11703 } 11704 11705 GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { 11706 return adjustGVALinkageForExternalDefinitionKind(*this, FD, 11707 adjustGVALinkageForAttributes(*this, FD, 11708 basicGVALinkageForFunction(*this, FD))); 11709 } 11710 11711 static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, 11712 const VarDecl *VD) { 11713 // As an extension for interactive REPLs, make sure constant variables are 11714 // only emitted once instead of LinkageComputer::getLVForNamespaceScopeDecl 11715 // marking them as internal. 11716 if (Context.getLangOpts().CPlusPlus && 11717 Context.getLangOpts().IncrementalExtensions && 11718 VD->getType().isConstQualified() && 11719 !VD->getType().isVolatileQualified() && !VD->isInline() && 11720 !isa<VarTemplateSpecializationDecl>(VD) && !VD->getDescribedVarTemplate()) 11721 return GVA_DiscardableODR; 11722 11723 if (!VD->isExternallyVisible()) 11724 return GVA_Internal; 11725 11726 if (VD->isStaticLocal()) { 11727 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); 11728 while (LexicalContext && !isa<FunctionDecl>(LexicalContext)) 11729 LexicalContext = LexicalContext->getLexicalParent(); 11730 11731 // ObjC Blocks can create local variables that don't have a FunctionDecl 11732 // LexicalContext. 11733 if (!LexicalContext) 11734 return GVA_DiscardableODR; 11735 11736 // Otherwise, let the static local variable inherit its linkage from the 11737 // nearest enclosing function. 11738 auto StaticLocalLinkage = 11739 Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext)); 11740 11741 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must 11742 // be emitted in any object with references to the symbol for the object it 11743 // contains, whether inline or out-of-line." 11744 // Similar behavior is observed with MSVC. An alternative ABI could use 11745 // StrongODR/AvailableExternally to match the function, but none are 11746 // known/supported currently. 11747 if (StaticLocalLinkage == GVA_StrongODR || 11748 StaticLocalLinkage == GVA_AvailableExternally) 11749 return GVA_DiscardableODR; 11750 return StaticLocalLinkage; 11751 } 11752 11753 // MSVC treats in-class initialized static data members as definitions. 11754 // By giving them non-strong linkage, out-of-line definitions won't 11755 // cause link errors. 11756 if (Context.isMSStaticDataMemberInlineDefinition(VD)) 11757 return GVA_DiscardableODR; 11758 11759 // Most non-template variables have strong linkage; inline variables are 11760 // linkonce_odr or (occasionally, for compatibility) weak_odr. 11761 GVALinkage StrongLinkage; 11762 switch (Context.getInlineVariableDefinitionKind(VD)) { 11763 case ASTContext::InlineVariableDefinitionKind::None: 11764 StrongLinkage = GVA_StrongExternal; 11765 break; 11766 case ASTContext::InlineVariableDefinitionKind::Weak: 11767 case ASTContext::InlineVariableDefinitionKind::WeakUnknown: 11768 StrongLinkage = GVA_DiscardableODR; 11769 break; 11770 case ASTContext::InlineVariableDefinitionKind::Strong: 11771 StrongLinkage = GVA_StrongODR; 11772 break; 11773 } 11774 11775 switch (VD->getTemplateSpecializationKind()) { 11776 case TSK_Undeclared: 11777 return StrongLinkage; 11778 11779 case TSK_ExplicitSpecialization: 11780 return Context.getTargetInfo().getCXXABI().isMicrosoft() && 11781 VD->isStaticDataMember() 11782 ? GVA_StrongODR 11783 : StrongLinkage; 11784 11785 case TSK_ExplicitInstantiationDefinition: 11786 return GVA_StrongODR; 11787 11788 case TSK_ExplicitInstantiationDeclaration: 11789 return GVA_AvailableExternally; 11790 11791 case TSK_ImplicitInstantiation: 11792 return GVA_DiscardableODR; 11793 } 11794 11795 llvm_unreachable("Invalid Linkage!"); 11796 } 11797 11798 GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) const { 11799 return adjustGVALinkageForExternalDefinitionKind(*this, VD, 11800 adjustGVALinkageForAttributes(*this, VD, 11801 basicGVALinkageForVariable(*this, VD))); 11802 } 11803 11804 bool ASTContext::DeclMustBeEmitted(const Decl *D) { 11805 if (const auto *VD = dyn_cast<VarDecl>(D)) { 11806 if (!VD->isFileVarDecl()) 11807 return false; 11808 // Global named register variables (GNU extension) are never emitted. 11809 if (VD->getStorageClass() == SC_Register) 11810 return false; 11811 if (VD->getDescribedVarTemplate() || 11812 isa<VarTemplatePartialSpecializationDecl>(VD)) 11813 return false; 11814 } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11815 // We never need to emit an uninstantiated function template. 11816 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 11817 return false; 11818 } else if (isa<PragmaCommentDecl>(D)) 11819 return true; 11820 else if (isa<PragmaDetectMismatchDecl>(D)) 11821 return true; 11822 else if (isa<OMPRequiresDecl>(D)) 11823 return true; 11824 else if (isa<OMPThreadPrivateDecl>(D)) 11825 return !D->getDeclContext()->isDependentContext(); 11826 else if (isa<OMPAllocateDecl>(D)) 11827 return !D->getDeclContext()->isDependentContext(); 11828 else if (isa<OMPDeclareReductionDecl>(D) || isa<OMPDeclareMapperDecl>(D)) 11829 return !D->getDeclContext()->isDependentContext(); 11830 else if (isa<ImportDecl>(D)) 11831 return true; 11832 else 11833 return false; 11834 11835 // If this is a member of a class template, we do not need to emit it. 11836 if (D->getDeclContext()->isDependentContext()) 11837 return false; 11838 11839 // Weak references don't produce any output by themselves. 11840 if (D->hasAttr<WeakRefAttr>()) 11841 return false; 11842 11843 // Aliases and used decls are required. 11844 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) 11845 return true; 11846 11847 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11848 // Forward declarations aren't required. 11849 if (!FD->doesThisDeclarationHaveABody()) 11850 return FD->doesDeclarationForceExternallyVisibleDefinition(); 11851 11852 // Constructors and destructors are required. 11853 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) 11854 return true; 11855 11856 // The key function for a class is required. This rule only comes 11857 // into play when inline functions can be key functions, though. 11858 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { 11859 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) { 11860 const CXXRecordDecl *RD = MD->getParent(); 11861 if (MD->isOutOfLine() && RD->isDynamicClass()) { 11862 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); 11863 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) 11864 return true; 11865 } 11866 } 11867 } 11868 11869 GVALinkage Linkage = GetGVALinkageForFunction(FD); 11870 11871 // static, static inline, always_inline, and extern inline functions can 11872 // always be deferred. Normal inline functions can be deferred in C99/C++. 11873 // Implicit template instantiations can also be deferred in C++. 11874 return !isDiscardableGVALinkage(Linkage); 11875 } 11876 11877 const auto *VD = cast<VarDecl>(D); 11878 assert(VD->isFileVarDecl() && "Expected file scoped var"); 11879 11880 // If the decl is marked as `declare target to`, it should be emitted for the 11881 // host and for the device. 11882 if (LangOpts.OpenMP && 11883 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 11884 return true; 11885 11886 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && 11887 !isMSStaticDataMemberInlineDefinition(VD)) 11888 return false; 11889 11890 // Variables in other module units shouldn't be forced to be emitted. 11891 if (VD->isInAnotherModuleUnit()) 11892 return false; 11893 11894 // Variables that can be needed in other TUs are required. 11895 auto Linkage = GetGVALinkageForVariable(VD); 11896 if (!isDiscardableGVALinkage(Linkage)) 11897 return true; 11898 11899 // We never need to emit a variable that is available in another TU. 11900 if (Linkage == GVA_AvailableExternally) 11901 return false; 11902 11903 // Variables that have destruction with side-effects are required. 11904 if (VD->needsDestruction(*this)) 11905 return true; 11906 11907 // Variables that have initialization with side-effects are required. 11908 if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && 11909 // We can get a value-dependent initializer during error recovery. 11910 (VD->getInit()->isValueDependent() || !VD->evaluateValue())) 11911 return true; 11912 11913 // Likewise, variables with tuple-like bindings are required if their 11914 // bindings have side-effects. 11915 if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) 11916 for (const auto *BD : DD->bindings()) 11917 if (const auto *BindingVD = BD->getHoldingVar()) 11918 if (DeclMustBeEmitted(BindingVD)) 11919 return true; 11920 11921 return false; 11922 } 11923 11924 void ASTContext::forEachMultiversionedFunctionVersion( 11925 const FunctionDecl *FD, 11926 llvm::function_ref<void(FunctionDecl *)> Pred) const { 11927 assert(FD->isMultiVersion() && "Only valid for multiversioned functions"); 11928 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls; 11929 FD = FD->getMostRecentDecl(); 11930 // FIXME: The order of traversal here matters and depends on the order of 11931 // lookup results, which happens to be (mostly) oldest-to-newest, but we 11932 // shouldn't rely on that. 11933 for (auto *CurDecl : 11934 FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { 11935 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); 11936 if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && 11937 !SeenDecls.contains(CurFD)) { 11938 SeenDecls.insert(CurFD); 11939 Pred(CurFD); 11940 } 11941 } 11942 } 11943 11944 CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, 11945 bool IsCXXMethod, 11946 bool IsBuiltin) const { 11947 // Pass through to the C++ ABI object 11948 if (IsCXXMethod) 11949 return ABI->getDefaultMethodCallConv(IsVariadic); 11950 11951 // Builtins ignore user-specified default calling convention and remain the 11952 // Target's default calling convention. 11953 if (!IsBuiltin) { 11954 switch (LangOpts.getDefaultCallingConv()) { 11955 case LangOptions::DCC_None: 11956 break; 11957 case LangOptions::DCC_CDecl: 11958 return CC_C; 11959 case LangOptions::DCC_FastCall: 11960 if (getTargetInfo().hasFeature("sse2") && !IsVariadic) 11961 return CC_X86FastCall; 11962 break; 11963 case LangOptions::DCC_StdCall: 11964 if (!IsVariadic) 11965 return CC_X86StdCall; 11966 break; 11967 case LangOptions::DCC_VectorCall: 11968 // __vectorcall cannot be applied to variadic functions. 11969 if (!IsVariadic) 11970 return CC_X86VectorCall; 11971 break; 11972 case LangOptions::DCC_RegCall: 11973 // __regcall cannot be applied to variadic functions. 11974 if (!IsVariadic) 11975 return CC_X86RegCall; 11976 break; 11977 case LangOptions::DCC_RtdCall: 11978 if (!IsVariadic) 11979 return CC_M68kRTD; 11980 break; 11981 } 11982 } 11983 return Target->getDefaultCallingConv(); 11984 } 11985 11986 bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { 11987 // Pass through to the C++ ABI object 11988 return ABI->isNearlyEmpty(RD); 11989 } 11990 11991 VTableContextBase *ASTContext::getVTableContext() { 11992 if (!VTContext.get()) { 11993 auto ABI = Target->getCXXABI(); 11994 if (ABI.isMicrosoft()) 11995 VTContext.reset(new MicrosoftVTableContext(*this)); 11996 else { 11997 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables 11998 ? ItaniumVTableContext::Relative 11999 : ItaniumVTableContext::Pointer; 12000 VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); 12001 } 12002 } 12003 return VTContext.get(); 12004 } 12005 12006 MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { 12007 if (!T) 12008 T = Target; 12009 switch (T->getCXXABI().getKind()) { 12010 case TargetCXXABI::AppleARM64: 12011 case TargetCXXABI::Fuchsia: 12012 case TargetCXXABI::GenericAArch64: 12013 case TargetCXXABI::GenericItanium: 12014 case TargetCXXABI::GenericARM: 12015 case TargetCXXABI::GenericMIPS: 12016 case TargetCXXABI::iOS: 12017 case TargetCXXABI::WebAssembly: 12018 case TargetCXXABI::WatchOS: 12019 case TargetCXXABI::XL: 12020 return ItaniumMangleContext::create(*this, getDiagnostics()); 12021 case TargetCXXABI::Microsoft: 12022 return MicrosoftMangleContext::create(*this, getDiagnostics()); 12023 } 12024 llvm_unreachable("Unsupported ABI"); 12025 } 12026 12027 MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) { 12028 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft && 12029 "Device mangle context does not support Microsoft mangling."); 12030 switch (T.getCXXABI().getKind()) { 12031 case TargetCXXABI::AppleARM64: 12032 case TargetCXXABI::Fuchsia: 12033 case TargetCXXABI::GenericAArch64: 12034 case TargetCXXABI::GenericItanium: 12035 case TargetCXXABI::GenericARM: 12036 case TargetCXXABI::GenericMIPS: 12037 case TargetCXXABI::iOS: 12038 case TargetCXXABI::WebAssembly: 12039 case TargetCXXABI::WatchOS: 12040 case TargetCXXABI::XL: 12041 return ItaniumMangleContext::create( 12042 *this, getDiagnostics(), 12043 [](ASTContext &, const NamedDecl *ND) -> std::optional<unsigned> { 12044 if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) 12045 return RD->getDeviceLambdaManglingNumber(); 12046 return std::nullopt; 12047 }, 12048 /*IsAux=*/true); 12049 case TargetCXXABI::Microsoft: 12050 return MicrosoftMangleContext::create(*this, getDiagnostics(), 12051 /*IsAux=*/true); 12052 } 12053 llvm_unreachable("Unsupported ABI"); 12054 } 12055 12056 CXXABI::~CXXABI() = default; 12057 12058 size_t ASTContext::getSideTableAllocatedMemory() const { 12059 return ASTRecordLayouts.getMemorySize() + 12060 llvm::capacity_in_bytes(ObjCLayouts) + 12061 llvm::capacity_in_bytes(KeyFunctions) + 12062 llvm::capacity_in_bytes(ObjCImpls) + 12063 llvm::capacity_in_bytes(BlockVarCopyInits) + 12064 llvm::capacity_in_bytes(DeclAttrs) + 12065 llvm::capacity_in_bytes(TemplateOrInstantiation) + 12066 llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + 12067 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + 12068 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + 12069 llvm::capacity_in_bytes(OverriddenMethods) + 12070 llvm::capacity_in_bytes(Types) + 12071 llvm::capacity_in_bytes(VariableArrayTypes); 12072 } 12073 12074 /// getIntTypeForBitwidth - 12075 /// sets integer QualTy according to specified details: 12076 /// bitwidth, signed/unsigned. 12077 /// Returns empty type if there is no appropriate target types. 12078 QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, 12079 unsigned Signed) const { 12080 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); 12081 CanQualType QualTy = getFromTargetType(Ty); 12082 if (!QualTy && DestWidth == 128) 12083 return Signed ? Int128Ty : UnsignedInt128Ty; 12084 return QualTy; 12085 } 12086 12087 /// getRealTypeForBitwidth - 12088 /// sets floating point QualTy according to specified bitwidth. 12089 /// Returns empty type if there is no appropriate target types. 12090 QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, 12091 FloatModeKind ExplicitType) const { 12092 FloatModeKind Ty = 12093 getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitType); 12094 switch (Ty) { 12095 case FloatModeKind::Half: 12096 return HalfTy; 12097 case FloatModeKind::Float: 12098 return FloatTy; 12099 case FloatModeKind::Double: 12100 return DoubleTy; 12101 case FloatModeKind::LongDouble: 12102 return LongDoubleTy; 12103 case FloatModeKind::Float128: 12104 return Float128Ty; 12105 case FloatModeKind::Ibm128: 12106 return Ibm128Ty; 12107 case FloatModeKind::NoFloat: 12108 return {}; 12109 } 12110 12111 llvm_unreachable("Unhandled TargetInfo::RealType value"); 12112 } 12113 12114 void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { 12115 if (Number > 1) 12116 MangleNumbers[ND] = Number; 12117 } 12118 12119 unsigned ASTContext::getManglingNumber(const NamedDecl *ND, 12120 bool ForAuxTarget) const { 12121 auto I = MangleNumbers.find(ND); 12122 unsigned Res = I != MangleNumbers.end() ? I->second : 1; 12123 // CUDA/HIP host compilation encodes host and device mangling numbers 12124 // as lower and upper half of 32 bit integer. 12125 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) { 12126 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF; 12127 } else { 12128 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling " 12129 "number for aux target"); 12130 } 12131 return Res > 1 ? Res : 1; 12132 } 12133 12134 void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { 12135 if (Number > 1) 12136 StaticLocalNumbers[VD] = Number; 12137 } 12138 12139 unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { 12140 auto I = StaticLocalNumbers.find(VD); 12141 return I != StaticLocalNumbers.end() ? I->second : 1; 12142 } 12143 12144 MangleNumberingContext & 12145 ASTContext::getManglingNumberContext(const DeclContext *DC) { 12146 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12147 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC]; 12148 if (!MCtx) 12149 MCtx = createMangleNumberingContext(); 12150 return *MCtx; 12151 } 12152 12153 MangleNumberingContext & 12154 ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { 12155 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12156 std::unique_ptr<MangleNumberingContext> &MCtx = 12157 ExtraMangleNumberingContexts[D]; 12158 if (!MCtx) 12159 MCtx = createMangleNumberingContext(); 12160 return *MCtx; 12161 } 12162 12163 std::unique_ptr<MangleNumberingContext> 12164 ASTContext::createMangleNumberingContext() const { 12165 return ABI->createMangleNumberingContext(); 12166 } 12167 12168 const CXXConstructorDecl * 12169 ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { 12170 return ABI->getCopyConstructorForExceptionObject( 12171 cast<CXXRecordDecl>(RD->getFirstDecl())); 12172 } 12173 12174 void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, 12175 CXXConstructorDecl *CD) { 12176 return ABI->addCopyConstructorForExceptionObject( 12177 cast<CXXRecordDecl>(RD->getFirstDecl()), 12178 cast<CXXConstructorDecl>(CD->getFirstDecl())); 12179 } 12180 12181 void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, 12182 TypedefNameDecl *DD) { 12183 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); 12184 } 12185 12186 TypedefNameDecl * 12187 ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { 12188 return ABI->getTypedefNameForUnnamedTagDecl(TD); 12189 } 12190 12191 void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, 12192 DeclaratorDecl *DD) { 12193 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); 12194 } 12195 12196 DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { 12197 return ABI->getDeclaratorForUnnamedTagDecl(TD); 12198 } 12199 12200 void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { 12201 ParamIndices[D] = index; 12202 } 12203 12204 unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { 12205 ParameterIndexTable::const_iterator I = ParamIndices.find(D); 12206 assert(I != ParamIndices.end() && 12207 "ParmIndices lacks entry set by ParmVarDecl"); 12208 return I->second; 12209 } 12210 12211 QualType ASTContext::getStringLiteralArrayType(QualType EltTy, 12212 unsigned Length) const { 12213 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). 12214 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) 12215 EltTy = EltTy.withConst(); 12216 12217 EltTy = adjustStringLiteralBaseType(EltTy); 12218 12219 // Get an array type for the string, according to C99 6.4.5. This includes 12220 // the null terminator character. 12221 return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, 12222 ArraySizeModifier::Normal, /*IndexTypeQuals*/ 0); 12223 } 12224 12225 StringLiteral * 12226 ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { 12227 StringLiteral *&Result = StringLiteralCache[Key]; 12228 if (!Result) 12229 Result = StringLiteral::Create( 12230 *this, Key, StringLiteralKind::Ordinary, 12231 /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), 12232 SourceLocation()); 12233 return Result; 12234 } 12235 12236 MSGuidDecl * 12237 ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { 12238 assert(MSGuidTagDecl && "building MS GUID without MS extensions?"); 12239 12240 llvm::FoldingSetNodeID ID; 12241 MSGuidDecl::Profile(ID, Parts); 12242 12243 void *InsertPos; 12244 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) 12245 return Existing; 12246 12247 QualType GUIDType = getMSGuidType().withConst(); 12248 MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); 12249 MSGuidDecls.InsertNode(New, InsertPos); 12250 return New; 12251 } 12252 12253 UnnamedGlobalConstantDecl * 12254 ASTContext::getUnnamedGlobalConstantDecl(QualType Ty, 12255 const APValue &APVal) const { 12256 llvm::FoldingSetNodeID ID; 12257 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal); 12258 12259 void *InsertPos; 12260 if (UnnamedGlobalConstantDecl *Existing = 12261 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos)) 12262 return Existing; 12263 12264 UnnamedGlobalConstantDecl *New = 12265 UnnamedGlobalConstantDecl::Create(*this, Ty, APVal); 12266 UnnamedGlobalConstantDecls.InsertNode(New, InsertPos); 12267 return New; 12268 } 12269 12270 TemplateParamObjectDecl * 12271 ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { 12272 assert(T->isRecordType() && "template param object of unexpected type"); 12273 12274 // C++ [temp.param]p8: 12275 // [...] a static storage duration object of type 'const T' [...] 12276 T.addConst(); 12277 12278 llvm::FoldingSetNodeID ID; 12279 TemplateParamObjectDecl::Profile(ID, T, V); 12280 12281 void *InsertPos; 12282 if (TemplateParamObjectDecl *Existing = 12283 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos)) 12284 return Existing; 12285 12286 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V); 12287 TemplateParamObjectDecls.InsertNode(New, InsertPos); 12288 return New; 12289 } 12290 12291 bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { 12292 const llvm::Triple &T = getTargetInfo().getTriple(); 12293 if (!T.isOSDarwin()) 12294 return false; 12295 12296 if (!(T.isiOS() && T.isOSVersionLT(7)) && 12297 !(T.isMacOSX() && T.isOSVersionLT(10, 9))) 12298 return false; 12299 12300 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 12301 CharUnits sizeChars = getTypeSizeInChars(AtomicTy); 12302 uint64_t Size = sizeChars.getQuantity(); 12303 CharUnits alignChars = getTypeAlignInChars(AtomicTy); 12304 unsigned Align = alignChars.getQuantity(); 12305 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); 12306 return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); 12307 } 12308 12309 bool 12310 ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, 12311 const ObjCMethodDecl *MethodImpl) { 12312 // No point trying to match an unavailable/deprecated mothod. 12313 if (MethodDecl->hasAttr<UnavailableAttr>() 12314 || MethodDecl->hasAttr<DeprecatedAttr>()) 12315 return false; 12316 if (MethodDecl->getObjCDeclQualifier() != 12317 MethodImpl->getObjCDeclQualifier()) 12318 return false; 12319 if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) 12320 return false; 12321 12322 if (MethodDecl->param_size() != MethodImpl->param_size()) 12323 return false; 12324 12325 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), 12326 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), 12327 EF = MethodDecl->param_end(); 12328 IM != EM && IF != EF; ++IM, ++IF) { 12329 const ParmVarDecl *DeclVar = (*IF); 12330 const ParmVarDecl *ImplVar = (*IM); 12331 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) 12332 return false; 12333 if (!hasSameType(DeclVar->getType(), ImplVar->getType())) 12334 return false; 12335 } 12336 12337 return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); 12338 } 12339 12340 uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { 12341 LangAS AS; 12342 if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) 12343 AS = LangAS::Default; 12344 else 12345 AS = QT->getPointeeType().getAddressSpace(); 12346 12347 return getTargetInfo().getNullPointerValue(AS); 12348 } 12349 12350 unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { 12351 return getTargetInfo().getTargetAddressSpace(AS); 12352 } 12353 12354 bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const { 12355 if (X == Y) 12356 return true; 12357 if (!X || !Y) 12358 return false; 12359 llvm::FoldingSetNodeID IDX, IDY; 12360 X->Profile(IDX, *this, /*Canonical=*/true); 12361 Y->Profile(IDY, *this, /*Canonical=*/true); 12362 return IDX == IDY; 12363 } 12364 12365 // The getCommon* helpers return, for given 'same' X and Y entities given as 12366 // inputs, another entity which is also the 'same' as the inputs, but which 12367 // is closer to the canonical form of the inputs, each according to a given 12368 // criteria. 12369 // The getCommon*Checked variants are 'null inputs not-allowed' equivalents of 12370 // the regular ones. 12371 12372 static Decl *getCommonDecl(Decl *X, Decl *Y) { 12373 if (!declaresSameEntity(X, Y)) 12374 return nullptr; 12375 for (const Decl *DX : X->redecls()) { 12376 // If we reach Y before reaching the first decl, that means X is older. 12377 if (DX == Y) 12378 return X; 12379 // If we reach the first decl, then Y is older. 12380 if (DX->isFirstDecl()) 12381 return Y; 12382 } 12383 llvm_unreachable("Corrupt redecls chain"); 12384 } 12385 12386 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12387 static T *getCommonDecl(T *X, T *Y) { 12388 return cast_or_null<T>( 12389 getCommonDecl(const_cast<Decl *>(cast_or_null<Decl>(X)), 12390 const_cast<Decl *>(cast_or_null<Decl>(Y)))); 12391 } 12392 12393 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12394 static T *getCommonDeclChecked(T *X, T *Y) { 12395 return cast<T>(getCommonDecl(const_cast<Decl *>(cast<Decl>(X)), 12396 const_cast<Decl *>(cast<Decl>(Y)))); 12397 } 12398 12399 static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X, 12400 TemplateName Y) { 12401 if (X.getAsVoidPointer() == Y.getAsVoidPointer()) 12402 return X; 12403 // FIXME: There are cases here where we could find a common template name 12404 // with more sugar. For example one could be a SubstTemplateTemplate* 12405 // replacing the other. 12406 TemplateName CX = Ctx.getCanonicalTemplateName(X); 12407 if (CX.getAsVoidPointer() != 12408 Ctx.getCanonicalTemplateName(Y).getAsVoidPointer()) 12409 return TemplateName(); 12410 return CX; 12411 } 12412 12413 static TemplateName 12414 getCommonTemplateNameChecked(ASTContext &Ctx, TemplateName X, TemplateName Y) { 12415 TemplateName R = getCommonTemplateName(Ctx, X, Y); 12416 assert(R.getAsVoidPointer() != nullptr); 12417 return R; 12418 } 12419 12420 static auto getCommonTypes(ASTContext &Ctx, ArrayRef<QualType> Xs, 12421 ArrayRef<QualType> Ys, bool Unqualified = false) { 12422 assert(Xs.size() == Ys.size()); 12423 SmallVector<QualType, 8> Rs(Xs.size()); 12424 for (size_t I = 0; I < Rs.size(); ++I) 12425 Rs[I] = Ctx.getCommonSugaredType(Xs[I], Ys[I], Unqualified); 12426 return Rs; 12427 } 12428 12429 template <class T> 12430 static SourceLocation getCommonAttrLoc(const T *X, const T *Y) { 12431 return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc() 12432 : SourceLocation(); 12433 } 12434 12435 static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx, 12436 const TemplateArgument &X, 12437 const TemplateArgument &Y) { 12438 if (X.getKind() != Y.getKind()) 12439 return TemplateArgument(); 12440 12441 switch (X.getKind()) { 12442 case TemplateArgument::ArgKind::Type: 12443 if (!Ctx.hasSameType(X.getAsType(), Y.getAsType())) 12444 return TemplateArgument(); 12445 return TemplateArgument( 12446 Ctx.getCommonSugaredType(X.getAsType(), Y.getAsType())); 12447 case TemplateArgument::ArgKind::NullPtr: 12448 if (!Ctx.hasSameType(X.getNullPtrType(), Y.getNullPtrType())) 12449 return TemplateArgument(); 12450 return TemplateArgument( 12451 Ctx.getCommonSugaredType(X.getNullPtrType(), Y.getNullPtrType()), 12452 /*Unqualified=*/true); 12453 case TemplateArgument::ArgKind::Expression: 12454 if (!Ctx.hasSameType(X.getAsExpr()->getType(), Y.getAsExpr()->getType())) 12455 return TemplateArgument(); 12456 // FIXME: Try to keep the common sugar. 12457 return X; 12458 case TemplateArgument::ArgKind::Template: { 12459 TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate(); 12460 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12461 if (!CTN.getAsVoidPointer()) 12462 return TemplateArgument(); 12463 return TemplateArgument(CTN); 12464 } 12465 case TemplateArgument::ArgKind::TemplateExpansion: { 12466 TemplateName TX = X.getAsTemplateOrTemplatePattern(), 12467 TY = Y.getAsTemplateOrTemplatePattern(); 12468 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12469 if (!CTN.getAsVoidPointer()) 12470 return TemplateName(); 12471 auto NExpX = X.getNumTemplateExpansions(); 12472 assert(NExpX == Y.getNumTemplateExpansions()); 12473 return TemplateArgument(CTN, NExpX); 12474 } 12475 default: 12476 // FIXME: Handle the other argument kinds. 12477 return X; 12478 } 12479 } 12480 12481 static bool getCommonTemplateArguments(ASTContext &Ctx, 12482 SmallVectorImpl<TemplateArgument> &R, 12483 ArrayRef<TemplateArgument> Xs, 12484 ArrayRef<TemplateArgument> Ys) { 12485 if (Xs.size() != Ys.size()) 12486 return true; 12487 R.resize(Xs.size()); 12488 for (size_t I = 0; I < R.size(); ++I) { 12489 R[I] = getCommonTemplateArgument(Ctx, Xs[I], Ys[I]); 12490 if (R[I].isNull()) 12491 return true; 12492 } 12493 return false; 12494 } 12495 12496 static auto getCommonTemplateArguments(ASTContext &Ctx, 12497 ArrayRef<TemplateArgument> Xs, 12498 ArrayRef<TemplateArgument> Ys) { 12499 SmallVector<TemplateArgument, 8> R; 12500 bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys); 12501 assert(!Different); 12502 (void)Different; 12503 return R; 12504 } 12505 12506 template <class T> 12507 static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y) { 12508 return X->getKeyword() == Y->getKeyword() ? X->getKeyword() 12509 : ElaboratedTypeKeyword::None; 12510 } 12511 12512 template <class T> 12513 static NestedNameSpecifier *getCommonNNS(ASTContext &Ctx, const T *X, 12514 const T *Y) { 12515 // FIXME: Try to keep the common NNS sugar. 12516 return X->getQualifier() == Y->getQualifier() 12517 ? X->getQualifier() 12518 : Ctx.getCanonicalNestedNameSpecifier(X->getQualifier()); 12519 } 12520 12521 template <class T> 12522 static QualType getCommonElementType(ASTContext &Ctx, const T *X, const T *Y) { 12523 return Ctx.getCommonSugaredType(X->getElementType(), Y->getElementType()); 12524 } 12525 12526 template <class T> 12527 static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X, 12528 Qualifiers &QX, const T *Y, 12529 Qualifiers &QY) { 12530 QualType EX = X->getElementType(), EY = Y->getElementType(); 12531 QualType R = Ctx.getCommonSugaredType(EX, EY, 12532 /*Unqualified=*/true); 12533 Qualifiers RQ = R.getQualifiers(); 12534 QX += EX.getQualifiers() - RQ; 12535 QY += EY.getQualifiers() - RQ; 12536 return R; 12537 } 12538 12539 template <class T> 12540 static QualType getCommonPointeeType(ASTContext &Ctx, const T *X, const T *Y) { 12541 return Ctx.getCommonSugaredType(X->getPointeeType(), Y->getPointeeType()); 12542 } 12543 12544 template <class T> static auto *getCommonSizeExpr(ASTContext &Ctx, T *X, T *Y) { 12545 assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr())); 12546 return X->getSizeExpr(); 12547 } 12548 12549 static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) { 12550 assert(X->getSizeModifier() == Y->getSizeModifier()); 12551 return X->getSizeModifier(); 12552 } 12553 12554 static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X, 12555 const ArrayType *Y) { 12556 assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers()); 12557 return X->getIndexTypeCVRQualifiers(); 12558 } 12559 12560 // Merges two type lists such that the resulting vector will contain 12561 // each type (in a canonical sense) only once, in the order they appear 12562 // from X to Y. If they occur in both X and Y, the result will contain 12563 // the common sugared type between them. 12564 static void mergeTypeLists(ASTContext &Ctx, SmallVectorImpl<QualType> &Out, 12565 ArrayRef<QualType> X, ArrayRef<QualType> Y) { 12566 llvm::DenseMap<QualType, unsigned> Found; 12567 for (auto Ts : {X, Y}) { 12568 for (QualType T : Ts) { 12569 auto Res = Found.try_emplace(Ctx.getCanonicalType(T), Out.size()); 12570 if (!Res.second) { 12571 QualType &U = Out[Res.first->second]; 12572 U = Ctx.getCommonSugaredType(U, T); 12573 } else { 12574 Out.emplace_back(T); 12575 } 12576 } 12577 } 12578 } 12579 12580 FunctionProtoType::ExceptionSpecInfo 12581 ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1, 12582 FunctionProtoType::ExceptionSpecInfo ESI2, 12583 SmallVectorImpl<QualType> &ExceptionTypeStorage, 12584 bool AcceptDependent) { 12585 ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type; 12586 12587 // If either of them can throw anything, that is the result. 12588 for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) { 12589 if (EST1 == I) 12590 return ESI1; 12591 if (EST2 == I) 12592 return ESI2; 12593 } 12594 12595 // If either of them is non-throwing, the result is the other. 12596 for (auto I : 12597 {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) { 12598 if (EST1 == I) 12599 return ESI2; 12600 if (EST2 == I) 12601 return ESI1; 12602 } 12603 12604 // If we're left with value-dependent computed noexcept expressions, we're 12605 // stuck. Before C++17, we can just drop the exception specification entirely, 12606 // since it's not actually part of the canonical type. And this should never 12607 // happen in C++17, because it would mean we were computing the composite 12608 // pointer type of dependent types, which should never happen. 12609 if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) { 12610 assert(AcceptDependent && 12611 "computing composite pointer type of dependent types"); 12612 return FunctionProtoType::ExceptionSpecInfo(); 12613 } 12614 12615 // Switch over the possibilities so that people adding new values know to 12616 // update this function. 12617 switch (EST1) { 12618 case EST_None: 12619 case EST_DynamicNone: 12620 case EST_MSAny: 12621 case EST_BasicNoexcept: 12622 case EST_DependentNoexcept: 12623 case EST_NoexceptFalse: 12624 case EST_NoexceptTrue: 12625 case EST_NoThrow: 12626 llvm_unreachable("These ESTs should be handled above"); 12627 12628 case EST_Dynamic: { 12629 // This is the fun case: both exception specifications are dynamic. Form 12630 // the union of the two lists. 12631 assert(EST2 == EST_Dynamic && "other cases should already be handled"); 12632 mergeTypeLists(*this, ExceptionTypeStorage, ESI1.Exceptions, 12633 ESI2.Exceptions); 12634 FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic); 12635 Result.Exceptions = ExceptionTypeStorage; 12636 return Result; 12637 } 12638 12639 case EST_Unevaluated: 12640 case EST_Uninstantiated: 12641 case EST_Unparsed: 12642 llvm_unreachable("shouldn't see unresolved exception specifications here"); 12643 } 12644 12645 llvm_unreachable("invalid ExceptionSpecificationType"); 12646 } 12647 12648 static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X, 12649 Qualifiers &QX, const Type *Y, 12650 Qualifiers &QY) { 12651 Type::TypeClass TC = X->getTypeClass(); 12652 assert(TC == Y->getTypeClass()); 12653 switch (TC) { 12654 #define UNEXPECTED_TYPE(Class, Kind) \ 12655 case Type::Class: \ 12656 llvm_unreachable("Unexpected " Kind ": " #Class); 12657 12658 #define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical") 12659 #define TYPE(Class, Base) 12660 #include "clang/AST/TypeNodes.inc" 12661 12662 #define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free") 12663 SUGAR_FREE_TYPE(Builtin) 12664 SUGAR_FREE_TYPE(DeducedTemplateSpecialization) 12665 SUGAR_FREE_TYPE(DependentBitInt) 12666 SUGAR_FREE_TYPE(Enum) 12667 SUGAR_FREE_TYPE(BitInt) 12668 SUGAR_FREE_TYPE(ObjCInterface) 12669 SUGAR_FREE_TYPE(Record) 12670 SUGAR_FREE_TYPE(SubstTemplateTypeParmPack) 12671 SUGAR_FREE_TYPE(UnresolvedUsing) 12672 #undef SUGAR_FREE_TYPE 12673 #define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique") 12674 NON_UNIQUE_TYPE(TypeOfExpr) 12675 NON_UNIQUE_TYPE(VariableArray) 12676 #undef NON_UNIQUE_TYPE 12677 12678 UNEXPECTED_TYPE(TypeOf, "sugar") 12679 12680 #undef UNEXPECTED_TYPE 12681 12682 case Type::Auto: { 12683 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 12684 assert(AX->getDeducedType().isNull()); 12685 assert(AY->getDeducedType().isNull()); 12686 assert(AX->getKeyword() == AY->getKeyword()); 12687 assert(AX->isInstantiationDependentType() == 12688 AY->isInstantiationDependentType()); 12689 auto As = getCommonTemplateArguments(Ctx, AX->getTypeConstraintArguments(), 12690 AY->getTypeConstraintArguments()); 12691 return Ctx.getAutoType(QualType(), AX->getKeyword(), 12692 AX->isInstantiationDependentType(), 12693 AX->containsUnexpandedParameterPack(), 12694 getCommonDeclChecked(AX->getTypeConstraintConcept(), 12695 AY->getTypeConstraintConcept()), 12696 As); 12697 } 12698 case Type::IncompleteArray: { 12699 const auto *AX = cast<IncompleteArrayType>(X), 12700 *AY = cast<IncompleteArrayType>(Y); 12701 return Ctx.getIncompleteArrayType( 12702 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 12703 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 12704 } 12705 case Type::DependentSizedArray: { 12706 const auto *AX = cast<DependentSizedArrayType>(X), 12707 *AY = cast<DependentSizedArrayType>(Y); 12708 return Ctx.getDependentSizedArrayType( 12709 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 12710 getCommonSizeExpr(Ctx, AX, AY), getCommonSizeModifier(AX, AY), 12711 getCommonIndexTypeCVRQualifiers(AX, AY), 12712 AX->getBracketsRange() == AY->getBracketsRange() 12713 ? AX->getBracketsRange() 12714 : SourceRange()); 12715 } 12716 case Type::ConstantArray: { 12717 const auto *AX = cast<ConstantArrayType>(X), 12718 *AY = cast<ConstantArrayType>(Y); 12719 assert(AX->getSize() == AY->getSize()); 12720 const Expr *SizeExpr = Ctx.hasSameExpr(AX->getSizeExpr(), AY->getSizeExpr()) 12721 ? AX->getSizeExpr() 12722 : nullptr; 12723 return Ctx.getConstantArrayType( 12724 getCommonArrayElementType(Ctx, AX, QX, AY, QY), AX->getSize(), SizeExpr, 12725 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 12726 } 12727 case Type::Atomic: { 12728 const auto *AX = cast<AtomicType>(X), *AY = cast<AtomicType>(Y); 12729 return Ctx.getAtomicType( 12730 Ctx.getCommonSugaredType(AX->getValueType(), AY->getValueType())); 12731 } 12732 case Type::Complex: { 12733 const auto *CX = cast<ComplexType>(X), *CY = cast<ComplexType>(Y); 12734 return Ctx.getComplexType(getCommonArrayElementType(Ctx, CX, QX, CY, QY)); 12735 } 12736 case Type::Pointer: { 12737 const auto *PX = cast<PointerType>(X), *PY = cast<PointerType>(Y); 12738 return Ctx.getPointerType(getCommonPointeeType(Ctx, PX, PY)); 12739 } 12740 case Type::BlockPointer: { 12741 const auto *PX = cast<BlockPointerType>(X), *PY = cast<BlockPointerType>(Y); 12742 return Ctx.getBlockPointerType(getCommonPointeeType(Ctx, PX, PY)); 12743 } 12744 case Type::ObjCObjectPointer: { 12745 const auto *PX = cast<ObjCObjectPointerType>(X), 12746 *PY = cast<ObjCObjectPointerType>(Y); 12747 return Ctx.getObjCObjectPointerType(getCommonPointeeType(Ctx, PX, PY)); 12748 } 12749 case Type::MemberPointer: { 12750 const auto *PX = cast<MemberPointerType>(X), 12751 *PY = cast<MemberPointerType>(Y); 12752 return Ctx.getMemberPointerType( 12753 getCommonPointeeType(Ctx, PX, PY), 12754 Ctx.getCommonSugaredType(QualType(PX->getClass(), 0), 12755 QualType(PY->getClass(), 0)) 12756 .getTypePtr()); 12757 } 12758 case Type::LValueReference: { 12759 const auto *PX = cast<LValueReferenceType>(X), 12760 *PY = cast<LValueReferenceType>(Y); 12761 // FIXME: Preserve PointeeTypeAsWritten. 12762 return Ctx.getLValueReferenceType(getCommonPointeeType(Ctx, PX, PY), 12763 PX->isSpelledAsLValue() || 12764 PY->isSpelledAsLValue()); 12765 } 12766 case Type::RValueReference: { 12767 const auto *PX = cast<RValueReferenceType>(X), 12768 *PY = cast<RValueReferenceType>(Y); 12769 // FIXME: Preserve PointeeTypeAsWritten. 12770 return Ctx.getRValueReferenceType(getCommonPointeeType(Ctx, PX, PY)); 12771 } 12772 case Type::DependentAddressSpace: { 12773 const auto *PX = cast<DependentAddressSpaceType>(X), 12774 *PY = cast<DependentAddressSpaceType>(Y); 12775 assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr())); 12776 return Ctx.getDependentAddressSpaceType(getCommonPointeeType(Ctx, PX, PY), 12777 PX->getAddrSpaceExpr(), 12778 getCommonAttrLoc(PX, PY)); 12779 } 12780 case Type::FunctionNoProto: { 12781 const auto *FX = cast<FunctionNoProtoType>(X), 12782 *FY = cast<FunctionNoProtoType>(Y); 12783 assert(FX->getExtInfo() == FY->getExtInfo()); 12784 return Ctx.getFunctionNoProtoType( 12785 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()), 12786 FX->getExtInfo()); 12787 } 12788 case Type::FunctionProto: { 12789 const auto *FX = cast<FunctionProtoType>(X), 12790 *FY = cast<FunctionProtoType>(Y); 12791 FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(), 12792 EPIY = FY->getExtProtoInfo(); 12793 assert(EPIX.ExtInfo == EPIY.ExtInfo); 12794 assert(EPIX.ExtParameterInfos == EPIY.ExtParameterInfos); 12795 assert(EPIX.RefQualifier == EPIY.RefQualifier); 12796 assert(EPIX.TypeQuals == EPIY.TypeQuals); 12797 assert(EPIX.Variadic == EPIY.Variadic); 12798 12799 // FIXME: Can we handle an empty EllipsisLoc? 12800 // Use emtpy EllipsisLoc if X and Y differ. 12801 12802 EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn; 12803 12804 QualType R = 12805 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()); 12806 auto P = getCommonTypes(Ctx, FX->param_types(), FY->param_types(), 12807 /*Unqualified=*/true); 12808 12809 SmallVector<QualType, 8> Exceptions; 12810 EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs( 12811 EPIX.ExceptionSpec, EPIY.ExceptionSpec, Exceptions, true); 12812 return Ctx.getFunctionType(R, P, EPIX); 12813 } 12814 case Type::ObjCObject: { 12815 const auto *OX = cast<ObjCObjectType>(X), *OY = cast<ObjCObjectType>(Y); 12816 assert( 12817 std::equal(OX->getProtocols().begin(), OX->getProtocols().end(), 12818 OY->getProtocols().begin(), OY->getProtocols().end(), 12819 [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) { 12820 return P0->getCanonicalDecl() == P1->getCanonicalDecl(); 12821 }) && 12822 "protocol lists must be the same"); 12823 auto TAs = getCommonTypes(Ctx, OX->getTypeArgsAsWritten(), 12824 OY->getTypeArgsAsWritten()); 12825 return Ctx.getObjCObjectType( 12826 Ctx.getCommonSugaredType(OX->getBaseType(), OY->getBaseType()), TAs, 12827 OX->getProtocols(), 12828 OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten()); 12829 } 12830 case Type::ConstantMatrix: { 12831 const auto *MX = cast<ConstantMatrixType>(X), 12832 *MY = cast<ConstantMatrixType>(Y); 12833 assert(MX->getNumRows() == MY->getNumRows()); 12834 assert(MX->getNumColumns() == MY->getNumColumns()); 12835 return Ctx.getConstantMatrixType(getCommonElementType(Ctx, MX, MY), 12836 MX->getNumRows(), MX->getNumColumns()); 12837 } 12838 case Type::DependentSizedMatrix: { 12839 const auto *MX = cast<DependentSizedMatrixType>(X), 12840 *MY = cast<DependentSizedMatrixType>(Y); 12841 assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr())); 12842 assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr())); 12843 return Ctx.getDependentSizedMatrixType( 12844 getCommonElementType(Ctx, MX, MY), MX->getRowExpr(), 12845 MX->getColumnExpr(), getCommonAttrLoc(MX, MY)); 12846 } 12847 case Type::Vector: { 12848 const auto *VX = cast<VectorType>(X), *VY = cast<VectorType>(Y); 12849 assert(VX->getNumElements() == VY->getNumElements()); 12850 assert(VX->getVectorKind() == VY->getVectorKind()); 12851 return Ctx.getVectorType(getCommonElementType(Ctx, VX, VY), 12852 VX->getNumElements(), VX->getVectorKind()); 12853 } 12854 case Type::ExtVector: { 12855 const auto *VX = cast<ExtVectorType>(X), *VY = cast<ExtVectorType>(Y); 12856 assert(VX->getNumElements() == VY->getNumElements()); 12857 return Ctx.getExtVectorType(getCommonElementType(Ctx, VX, VY), 12858 VX->getNumElements()); 12859 } 12860 case Type::DependentSizedExtVector: { 12861 const auto *VX = cast<DependentSizedExtVectorType>(X), 12862 *VY = cast<DependentSizedExtVectorType>(Y); 12863 return Ctx.getDependentSizedExtVectorType(getCommonElementType(Ctx, VX, VY), 12864 getCommonSizeExpr(Ctx, VX, VY), 12865 getCommonAttrLoc(VX, VY)); 12866 } 12867 case Type::DependentVector: { 12868 const auto *VX = cast<DependentVectorType>(X), 12869 *VY = cast<DependentVectorType>(Y); 12870 assert(VX->getVectorKind() == VY->getVectorKind()); 12871 return Ctx.getDependentVectorType( 12872 getCommonElementType(Ctx, VX, VY), getCommonSizeExpr(Ctx, VX, VY), 12873 getCommonAttrLoc(VX, VY), VX->getVectorKind()); 12874 } 12875 case Type::InjectedClassName: { 12876 const auto *IX = cast<InjectedClassNameType>(X), 12877 *IY = cast<InjectedClassNameType>(Y); 12878 return Ctx.getInjectedClassNameType( 12879 getCommonDeclChecked(IX->getDecl(), IY->getDecl()), 12880 Ctx.getCommonSugaredType(IX->getInjectedSpecializationType(), 12881 IY->getInjectedSpecializationType())); 12882 } 12883 case Type::TemplateSpecialization: { 12884 const auto *TX = cast<TemplateSpecializationType>(X), 12885 *TY = cast<TemplateSpecializationType>(Y); 12886 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 12887 TY->template_arguments()); 12888 return Ctx.getTemplateSpecializationType( 12889 ::getCommonTemplateNameChecked(Ctx, TX->getTemplateName(), 12890 TY->getTemplateName()), 12891 As, X->getCanonicalTypeInternal()); 12892 } 12893 case Type::Decltype: { 12894 const auto *DX = cast<DecltypeType>(X); 12895 [[maybe_unused]] const auto *DY = cast<DecltypeType>(Y); 12896 assert(DX->isDependentType()); 12897 assert(DY->isDependentType()); 12898 assert(Ctx.hasSameExpr(DX->getUnderlyingExpr(), DY->getUnderlyingExpr())); 12899 // As Decltype is not uniqued, building a common type would be wasteful. 12900 return QualType(DX, 0); 12901 } 12902 case Type::DependentName: { 12903 const auto *NX = cast<DependentNameType>(X), 12904 *NY = cast<DependentNameType>(Y); 12905 assert(NX->getIdentifier() == NY->getIdentifier()); 12906 return Ctx.getDependentNameType( 12907 getCommonTypeKeyword(NX, NY), getCommonNNS(Ctx, NX, NY), 12908 NX->getIdentifier(), NX->getCanonicalTypeInternal()); 12909 } 12910 case Type::DependentTemplateSpecialization: { 12911 const auto *TX = cast<DependentTemplateSpecializationType>(X), 12912 *TY = cast<DependentTemplateSpecializationType>(Y); 12913 assert(TX->getIdentifier() == TY->getIdentifier()); 12914 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 12915 TY->template_arguments()); 12916 return Ctx.getDependentTemplateSpecializationType( 12917 getCommonTypeKeyword(TX, TY), getCommonNNS(Ctx, TX, TY), 12918 TX->getIdentifier(), As); 12919 } 12920 case Type::UnaryTransform: { 12921 const auto *TX = cast<UnaryTransformType>(X), 12922 *TY = cast<UnaryTransformType>(Y); 12923 assert(TX->getUTTKind() == TY->getUTTKind()); 12924 return Ctx.getUnaryTransformType( 12925 Ctx.getCommonSugaredType(TX->getBaseType(), TY->getBaseType()), 12926 Ctx.getCommonSugaredType(TX->getUnderlyingType(), 12927 TY->getUnderlyingType()), 12928 TX->getUTTKind()); 12929 } 12930 case Type::PackExpansion: { 12931 const auto *PX = cast<PackExpansionType>(X), 12932 *PY = cast<PackExpansionType>(Y); 12933 assert(PX->getNumExpansions() == PY->getNumExpansions()); 12934 return Ctx.getPackExpansionType( 12935 Ctx.getCommonSugaredType(PX->getPattern(), PY->getPattern()), 12936 PX->getNumExpansions(), false); 12937 } 12938 case Type::Pipe: { 12939 const auto *PX = cast<PipeType>(X), *PY = cast<PipeType>(Y); 12940 assert(PX->isReadOnly() == PY->isReadOnly()); 12941 auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType 12942 : &ASTContext::getWritePipeType; 12943 return (Ctx.*MP)(getCommonElementType(Ctx, PX, PY)); 12944 } 12945 case Type::TemplateTypeParm: { 12946 const auto *TX = cast<TemplateTypeParmType>(X), 12947 *TY = cast<TemplateTypeParmType>(Y); 12948 assert(TX->getDepth() == TY->getDepth()); 12949 assert(TX->getIndex() == TY->getIndex()); 12950 assert(TX->isParameterPack() == TY->isParameterPack()); 12951 return Ctx.getTemplateTypeParmType( 12952 TX->getDepth(), TX->getIndex(), TX->isParameterPack(), 12953 getCommonDecl(TX->getDecl(), TY->getDecl())); 12954 } 12955 } 12956 llvm_unreachable("Unknown Type Class"); 12957 } 12958 12959 static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X, 12960 const Type *Y, 12961 SplitQualType Underlying) { 12962 Type::TypeClass TC = X->getTypeClass(); 12963 if (TC != Y->getTypeClass()) 12964 return QualType(); 12965 switch (TC) { 12966 #define UNEXPECTED_TYPE(Class, Kind) \ 12967 case Type::Class: \ 12968 llvm_unreachable("Unexpected " Kind ": " #Class); 12969 #define TYPE(Class, Base) 12970 #define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent") 12971 #include "clang/AST/TypeNodes.inc" 12972 12973 #define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical") 12974 CANONICAL_TYPE(Atomic) 12975 CANONICAL_TYPE(BitInt) 12976 CANONICAL_TYPE(BlockPointer) 12977 CANONICAL_TYPE(Builtin) 12978 CANONICAL_TYPE(Complex) 12979 CANONICAL_TYPE(ConstantArray) 12980 CANONICAL_TYPE(ConstantMatrix) 12981 CANONICAL_TYPE(Enum) 12982 CANONICAL_TYPE(ExtVector) 12983 CANONICAL_TYPE(FunctionNoProto) 12984 CANONICAL_TYPE(FunctionProto) 12985 CANONICAL_TYPE(IncompleteArray) 12986 CANONICAL_TYPE(LValueReference) 12987 CANONICAL_TYPE(MemberPointer) 12988 CANONICAL_TYPE(ObjCInterface) 12989 CANONICAL_TYPE(ObjCObject) 12990 CANONICAL_TYPE(ObjCObjectPointer) 12991 CANONICAL_TYPE(Pipe) 12992 CANONICAL_TYPE(Pointer) 12993 CANONICAL_TYPE(Record) 12994 CANONICAL_TYPE(RValueReference) 12995 CANONICAL_TYPE(VariableArray) 12996 CANONICAL_TYPE(Vector) 12997 #undef CANONICAL_TYPE 12998 12999 #undef UNEXPECTED_TYPE 13000 13001 case Type::Adjusted: { 13002 const auto *AX = cast<AdjustedType>(X), *AY = cast<AdjustedType>(Y); 13003 QualType OX = AX->getOriginalType(), OY = AY->getOriginalType(); 13004 if (!Ctx.hasSameType(OX, OY)) 13005 return QualType(); 13006 // FIXME: It's inefficient to have to unify the original types. 13007 return Ctx.getAdjustedType(Ctx.getCommonSugaredType(OX, OY), 13008 Ctx.getQualifiedType(Underlying)); 13009 } 13010 case Type::Decayed: { 13011 const auto *DX = cast<DecayedType>(X), *DY = cast<DecayedType>(Y); 13012 QualType OX = DX->getOriginalType(), OY = DY->getOriginalType(); 13013 if (!Ctx.hasSameType(OX, OY)) 13014 return QualType(); 13015 // FIXME: It's inefficient to have to unify the original types. 13016 return Ctx.getDecayedType(Ctx.getCommonSugaredType(OX, OY), 13017 Ctx.getQualifiedType(Underlying)); 13018 } 13019 case Type::Attributed: { 13020 const auto *AX = cast<AttributedType>(X), *AY = cast<AttributedType>(Y); 13021 AttributedType::Kind Kind = AX->getAttrKind(); 13022 if (Kind != AY->getAttrKind()) 13023 return QualType(); 13024 QualType MX = AX->getModifiedType(), MY = AY->getModifiedType(); 13025 if (!Ctx.hasSameType(MX, MY)) 13026 return QualType(); 13027 // FIXME: It's inefficient to have to unify the modified types. 13028 return Ctx.getAttributedType(Kind, Ctx.getCommonSugaredType(MX, MY), 13029 Ctx.getQualifiedType(Underlying)); 13030 } 13031 case Type::BTFTagAttributed: { 13032 const auto *BX = cast<BTFTagAttributedType>(X); 13033 const BTFTypeTagAttr *AX = BX->getAttr(); 13034 // The attribute is not uniqued, so just compare the tag. 13035 if (AX->getBTFTypeTag() != 13036 cast<BTFTagAttributedType>(Y)->getAttr()->getBTFTypeTag()) 13037 return QualType(); 13038 return Ctx.getBTFTagAttributedType(AX, Ctx.getQualifiedType(Underlying)); 13039 } 13040 case Type::Auto: { 13041 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 13042 13043 AutoTypeKeyword KW = AX->getKeyword(); 13044 if (KW != AY->getKeyword()) 13045 return QualType(); 13046 13047 ConceptDecl *CD = ::getCommonDecl(AX->getTypeConstraintConcept(), 13048 AY->getTypeConstraintConcept()); 13049 SmallVector<TemplateArgument, 8> As; 13050 if (CD && 13051 getCommonTemplateArguments(Ctx, As, AX->getTypeConstraintArguments(), 13052 AY->getTypeConstraintArguments())) { 13053 CD = nullptr; // The arguments differ, so make it unconstrained. 13054 As.clear(); 13055 } 13056 13057 // Both auto types can't be dependent, otherwise they wouldn't have been 13058 // sugar. This implies they can't contain unexpanded packs either. 13059 return Ctx.getAutoType(Ctx.getQualifiedType(Underlying), AX->getKeyword(), 13060 /*IsDependent=*/false, /*IsPack=*/false, CD, As); 13061 } 13062 case Type::Decltype: 13063 return QualType(); 13064 case Type::DeducedTemplateSpecialization: 13065 // FIXME: Try to merge these. 13066 return QualType(); 13067 13068 case Type::Elaborated: { 13069 const auto *EX = cast<ElaboratedType>(X), *EY = cast<ElaboratedType>(Y); 13070 return Ctx.getElaboratedType( 13071 ::getCommonTypeKeyword(EX, EY), ::getCommonNNS(Ctx, EX, EY), 13072 Ctx.getQualifiedType(Underlying), 13073 ::getCommonDecl(EX->getOwnedTagDecl(), EY->getOwnedTagDecl())); 13074 } 13075 case Type::MacroQualified: { 13076 const auto *MX = cast<MacroQualifiedType>(X), 13077 *MY = cast<MacroQualifiedType>(Y); 13078 const IdentifierInfo *IX = MX->getMacroIdentifier(); 13079 if (IX != MY->getMacroIdentifier()) 13080 return QualType(); 13081 return Ctx.getMacroQualifiedType(Ctx.getQualifiedType(Underlying), IX); 13082 } 13083 case Type::SubstTemplateTypeParm: { 13084 const auto *SX = cast<SubstTemplateTypeParmType>(X), 13085 *SY = cast<SubstTemplateTypeParmType>(Y); 13086 Decl *CD = 13087 ::getCommonDecl(SX->getAssociatedDecl(), SY->getAssociatedDecl()); 13088 if (!CD) 13089 return QualType(); 13090 unsigned Index = SX->getIndex(); 13091 if (Index != SY->getIndex()) 13092 return QualType(); 13093 auto PackIndex = SX->getPackIndex(); 13094 if (PackIndex != SY->getPackIndex()) 13095 return QualType(); 13096 return Ctx.getSubstTemplateTypeParmType(Ctx.getQualifiedType(Underlying), 13097 CD, Index, PackIndex); 13098 } 13099 case Type::ObjCTypeParam: 13100 // FIXME: Try to merge these. 13101 return QualType(); 13102 case Type::Paren: 13103 return Ctx.getParenType(Ctx.getQualifiedType(Underlying)); 13104 13105 case Type::TemplateSpecialization: { 13106 const auto *TX = cast<TemplateSpecializationType>(X), 13107 *TY = cast<TemplateSpecializationType>(Y); 13108 TemplateName CTN = ::getCommonTemplateName(Ctx, TX->getTemplateName(), 13109 TY->getTemplateName()); 13110 if (!CTN.getAsVoidPointer()) 13111 return QualType(); 13112 SmallVector<TemplateArgument, 8> Args; 13113 if (getCommonTemplateArguments(Ctx, Args, TX->template_arguments(), 13114 TY->template_arguments())) 13115 return QualType(); 13116 return Ctx.getTemplateSpecializationType(CTN, Args, 13117 Ctx.getQualifiedType(Underlying)); 13118 } 13119 case Type::Typedef: { 13120 const auto *TX = cast<TypedefType>(X), *TY = cast<TypedefType>(Y); 13121 const TypedefNameDecl *CD = ::getCommonDecl(TX->getDecl(), TY->getDecl()); 13122 if (!CD) 13123 return QualType(); 13124 return Ctx.getTypedefType(CD, Ctx.getQualifiedType(Underlying)); 13125 } 13126 case Type::TypeOf: { 13127 // The common sugar between two typeof expressions, where one is 13128 // potentially a typeof_unqual and the other is not, we unify to the 13129 // qualified type as that retains the most information along with the type. 13130 // We only return a typeof_unqual type when both types are unqual types. 13131 TypeOfKind Kind = TypeOfKind::Qualified; 13132 if (cast<TypeOfType>(X)->getKind() == cast<TypeOfType>(Y)->getKind() && 13133 cast<TypeOfType>(X)->getKind() == TypeOfKind::Unqualified) 13134 Kind = TypeOfKind::Unqualified; 13135 return Ctx.getTypeOfType(Ctx.getQualifiedType(Underlying), Kind); 13136 } 13137 case Type::TypeOfExpr: 13138 return QualType(); 13139 13140 case Type::UnaryTransform: { 13141 const auto *UX = cast<UnaryTransformType>(X), 13142 *UY = cast<UnaryTransformType>(Y); 13143 UnaryTransformType::UTTKind KX = UX->getUTTKind(); 13144 if (KX != UY->getUTTKind()) 13145 return QualType(); 13146 QualType BX = UX->getBaseType(), BY = UY->getBaseType(); 13147 if (!Ctx.hasSameType(BX, BY)) 13148 return QualType(); 13149 // FIXME: It's inefficient to have to unify the base types. 13150 return Ctx.getUnaryTransformType(Ctx.getCommonSugaredType(BX, BY), 13151 Ctx.getQualifiedType(Underlying), KX); 13152 } 13153 case Type::Using: { 13154 const auto *UX = cast<UsingType>(X), *UY = cast<UsingType>(Y); 13155 const UsingShadowDecl *CD = 13156 ::getCommonDecl(UX->getFoundDecl(), UY->getFoundDecl()); 13157 if (!CD) 13158 return QualType(); 13159 return Ctx.getUsingType(CD, Ctx.getQualifiedType(Underlying)); 13160 } 13161 } 13162 llvm_unreachable("Unhandled Type Class"); 13163 } 13164 13165 static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) { 13166 SmallVector<SplitQualType, 8> R; 13167 while (true) { 13168 QTotal.addConsistentQualifiers(T.Quals); 13169 QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType(); 13170 if (NT == QualType(T.Ty, 0)) 13171 break; 13172 R.push_back(T); 13173 T = NT.split(); 13174 } 13175 return R; 13176 } 13177 13178 QualType ASTContext::getCommonSugaredType(QualType X, QualType Y, 13179 bool Unqualified) { 13180 assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y)); 13181 if (X == Y) 13182 return X; 13183 if (!Unqualified) { 13184 if (X.isCanonical()) 13185 return X; 13186 if (Y.isCanonical()) 13187 return Y; 13188 } 13189 13190 SplitQualType SX = X.split(), SY = Y.split(); 13191 Qualifiers QX, QY; 13192 // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys, 13193 // until we reach their underlying "canonical nodes". Note these are not 13194 // necessarily canonical types, as they may still have sugared properties. 13195 // QX and QY will store the sum of all qualifiers in Xs and Ys respectively. 13196 auto Xs = ::unwrapSugar(SX, QX), Ys = ::unwrapSugar(SY, QY); 13197 if (SX.Ty != SY.Ty) { 13198 // The canonical nodes differ. Build a common canonical node out of the two, 13199 // unifying their sugar. This may recurse back here. 13200 SX.Ty = 13201 ::getCommonNonSugarTypeNode(*this, SX.Ty, QX, SY.Ty, QY).getTypePtr(); 13202 } else { 13203 // The canonical nodes were identical: We may have desugared too much. 13204 // Add any common sugar back in. 13205 while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) { 13206 QX -= SX.Quals; 13207 QY -= SY.Quals; 13208 SX = Xs.pop_back_val(); 13209 SY = Ys.pop_back_val(); 13210 } 13211 } 13212 if (Unqualified) 13213 QX = Qualifiers::removeCommonQualifiers(QX, QY); 13214 else 13215 assert(QX == QY); 13216 13217 // Even though the remaining sugar nodes in Xs and Ys differ, some may be 13218 // related. Walk up these nodes, unifying them and adding the result. 13219 while (!Xs.empty() && !Ys.empty()) { 13220 auto Underlying = SplitQualType( 13221 SX.Ty, Qualifiers::removeCommonQualifiers(SX.Quals, SY.Quals)); 13222 SX = Xs.pop_back_val(); 13223 SY = Ys.pop_back_val(); 13224 SX.Ty = ::getCommonSugarTypeNode(*this, SX.Ty, SY.Ty, Underlying) 13225 .getTypePtrOrNull(); 13226 // Stop at the first pair which is unrelated. 13227 if (!SX.Ty) { 13228 SX.Ty = Underlying.Ty; 13229 break; 13230 } 13231 QX -= Underlying.Quals; 13232 }; 13233 13234 // Add back the missing accumulated qualifiers, which were stripped off 13235 // with the sugar nodes we could not unify. 13236 QualType R = getQualifiedType(SX.Ty, QX); 13237 assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X)); 13238 return R; 13239 } 13240 13241 QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { 13242 assert(Ty->isFixedPointType()); 13243 13244 if (Ty->isSaturatedFixedPointType()) return Ty; 13245 13246 switch (Ty->castAs<BuiltinType>()->getKind()) { 13247 default: 13248 llvm_unreachable("Not a fixed point type!"); 13249 case BuiltinType::ShortAccum: 13250 return SatShortAccumTy; 13251 case BuiltinType::Accum: 13252 return SatAccumTy; 13253 case BuiltinType::LongAccum: 13254 return SatLongAccumTy; 13255 case BuiltinType::UShortAccum: 13256 return SatUnsignedShortAccumTy; 13257 case BuiltinType::UAccum: 13258 return SatUnsignedAccumTy; 13259 case BuiltinType::ULongAccum: 13260 return SatUnsignedLongAccumTy; 13261 case BuiltinType::ShortFract: 13262 return SatShortFractTy; 13263 case BuiltinType::Fract: 13264 return SatFractTy; 13265 case BuiltinType::LongFract: 13266 return SatLongFractTy; 13267 case BuiltinType::UShortFract: 13268 return SatUnsignedShortFractTy; 13269 case BuiltinType::UFract: 13270 return SatUnsignedFractTy; 13271 case BuiltinType::ULongFract: 13272 return SatUnsignedLongFractTy; 13273 } 13274 } 13275 13276 LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { 13277 if (LangOpts.OpenCL) 13278 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); 13279 13280 if (LangOpts.CUDA) 13281 return getTargetInfo().getCUDABuiltinAddressSpace(AS); 13282 13283 return getLangASFromTargetAS(AS); 13284 } 13285 13286 // Explicitly instantiate this in case a Redeclarable<T> is used from a TU that 13287 // doesn't include ASTContext.h 13288 template 13289 clang::LazyGenerationalUpdatePtr< 13290 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType 13291 clang::LazyGenerationalUpdatePtr< 13292 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( 13293 const clang::ASTContext &Ctx, Decl *Value); 13294 13295 unsigned char ASTContext::getFixedPointScale(QualType Ty) const { 13296 assert(Ty->isFixedPointType()); 13297 13298 const TargetInfo &Target = getTargetInfo(); 13299 switch (Ty->castAs<BuiltinType>()->getKind()) { 13300 default: 13301 llvm_unreachable("Not a fixed point type!"); 13302 case BuiltinType::ShortAccum: 13303 case BuiltinType::SatShortAccum: 13304 return Target.getShortAccumScale(); 13305 case BuiltinType::Accum: 13306 case BuiltinType::SatAccum: 13307 return Target.getAccumScale(); 13308 case BuiltinType::LongAccum: 13309 case BuiltinType::SatLongAccum: 13310 return Target.getLongAccumScale(); 13311 case BuiltinType::UShortAccum: 13312 case BuiltinType::SatUShortAccum: 13313 return Target.getUnsignedShortAccumScale(); 13314 case BuiltinType::UAccum: 13315 case BuiltinType::SatUAccum: 13316 return Target.getUnsignedAccumScale(); 13317 case BuiltinType::ULongAccum: 13318 case BuiltinType::SatULongAccum: 13319 return Target.getUnsignedLongAccumScale(); 13320 case BuiltinType::ShortFract: 13321 case BuiltinType::SatShortFract: 13322 return Target.getShortFractScale(); 13323 case BuiltinType::Fract: 13324 case BuiltinType::SatFract: 13325 return Target.getFractScale(); 13326 case BuiltinType::LongFract: 13327 case BuiltinType::SatLongFract: 13328 return Target.getLongFractScale(); 13329 case BuiltinType::UShortFract: 13330 case BuiltinType::SatUShortFract: 13331 return Target.getUnsignedShortFractScale(); 13332 case BuiltinType::UFract: 13333 case BuiltinType::SatUFract: 13334 return Target.getUnsignedFractScale(); 13335 case BuiltinType::ULongFract: 13336 case BuiltinType::SatULongFract: 13337 return Target.getUnsignedLongFractScale(); 13338 } 13339 } 13340 13341 unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { 13342 assert(Ty->isFixedPointType()); 13343 13344 const TargetInfo &Target = getTargetInfo(); 13345 switch (Ty->castAs<BuiltinType>()->getKind()) { 13346 default: 13347 llvm_unreachable("Not a fixed point type!"); 13348 case BuiltinType::ShortAccum: 13349 case BuiltinType::SatShortAccum: 13350 return Target.getShortAccumIBits(); 13351 case BuiltinType::Accum: 13352 case BuiltinType::SatAccum: 13353 return Target.getAccumIBits(); 13354 case BuiltinType::LongAccum: 13355 case BuiltinType::SatLongAccum: 13356 return Target.getLongAccumIBits(); 13357 case BuiltinType::UShortAccum: 13358 case BuiltinType::SatUShortAccum: 13359 return Target.getUnsignedShortAccumIBits(); 13360 case BuiltinType::UAccum: 13361 case BuiltinType::SatUAccum: 13362 return Target.getUnsignedAccumIBits(); 13363 case BuiltinType::ULongAccum: 13364 case BuiltinType::SatULongAccum: 13365 return Target.getUnsignedLongAccumIBits(); 13366 case BuiltinType::ShortFract: 13367 case BuiltinType::SatShortFract: 13368 case BuiltinType::Fract: 13369 case BuiltinType::SatFract: 13370 case BuiltinType::LongFract: 13371 case BuiltinType::SatLongFract: 13372 case BuiltinType::UShortFract: 13373 case BuiltinType::SatUShortFract: 13374 case BuiltinType::UFract: 13375 case BuiltinType::SatUFract: 13376 case BuiltinType::ULongFract: 13377 case BuiltinType::SatULongFract: 13378 return 0; 13379 } 13380 } 13381 13382 llvm::FixedPointSemantics 13383 ASTContext::getFixedPointSemantics(QualType Ty) const { 13384 assert((Ty->isFixedPointType() || Ty->isIntegerType()) && 13385 "Can only get the fixed point semantics for a " 13386 "fixed point or integer type."); 13387 if (Ty->isIntegerType()) 13388 return llvm::FixedPointSemantics::GetIntegerSemantics( 13389 getIntWidth(Ty), Ty->isSignedIntegerType()); 13390 13391 bool isSigned = Ty->isSignedFixedPointType(); 13392 return llvm::FixedPointSemantics( 13393 static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, 13394 Ty->isSaturatedFixedPointType(), 13395 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); 13396 } 13397 13398 llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { 13399 assert(Ty->isFixedPointType()); 13400 return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty)); 13401 } 13402 13403 llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { 13404 assert(Ty->isFixedPointType()); 13405 return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty)); 13406 } 13407 13408 QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { 13409 assert(Ty->isUnsignedFixedPointType() && 13410 "Expected unsigned fixed point type"); 13411 13412 switch (Ty->castAs<BuiltinType>()->getKind()) { 13413 case BuiltinType::UShortAccum: 13414 return ShortAccumTy; 13415 case BuiltinType::UAccum: 13416 return AccumTy; 13417 case BuiltinType::ULongAccum: 13418 return LongAccumTy; 13419 case BuiltinType::SatUShortAccum: 13420 return SatShortAccumTy; 13421 case BuiltinType::SatUAccum: 13422 return SatAccumTy; 13423 case BuiltinType::SatULongAccum: 13424 return SatLongAccumTy; 13425 case BuiltinType::UShortFract: 13426 return ShortFractTy; 13427 case BuiltinType::UFract: 13428 return FractTy; 13429 case BuiltinType::ULongFract: 13430 return LongFractTy; 13431 case BuiltinType::SatUShortFract: 13432 return SatShortFractTy; 13433 case BuiltinType::SatUFract: 13434 return SatFractTy; 13435 case BuiltinType::SatULongFract: 13436 return SatLongFractTy; 13437 default: 13438 llvm_unreachable("Unexpected unsigned fixed point type"); 13439 } 13440 } 13441 13442 std::vector<std::string> ASTContext::filterFunctionTargetVersionAttrs( 13443 const TargetVersionAttr *TV) const { 13444 assert(TV != nullptr); 13445 llvm::SmallVector<StringRef, 8> Feats; 13446 std::vector<std::string> ResFeats; 13447 TV->getFeatures(Feats); 13448 for (auto &Feature : Feats) 13449 if (Target->validateCpuSupports(Feature.str())) 13450 // Use '?' to mark features that came from TargetVersion. 13451 ResFeats.push_back("?" + Feature.str()); 13452 return ResFeats; 13453 } 13454 13455 ParsedTargetAttr 13456 ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { 13457 assert(TD != nullptr); 13458 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(TD->getFeaturesStr()); 13459 13460 llvm::erase_if(ParsedAttr.Features, [&](const std::string &Feat) { 13461 return !Target->isValidFeatureName(StringRef{Feat}.substr(1)); 13462 }); 13463 return ParsedAttr; 13464 } 13465 13466 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 13467 const FunctionDecl *FD) const { 13468 if (FD) 13469 getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); 13470 else 13471 Target->initFeatureMap(FeatureMap, getDiagnostics(), 13472 Target->getTargetOpts().CPU, 13473 Target->getTargetOpts().Features); 13474 } 13475 13476 // Fills in the supplied string map with the set of target features for the 13477 // passed in function. 13478 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 13479 GlobalDecl GD) const { 13480 StringRef TargetCPU = Target->getTargetOpts().CPU; 13481 const FunctionDecl *FD = GD.getDecl()->getAsFunction(); 13482 if (const auto *TD = FD->getAttr<TargetAttr>()) { 13483 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); 13484 13485 // Make a copy of the features as passed on the command line into the 13486 // beginning of the additional features from the function to override. 13487 ParsedAttr.Features.insert( 13488 ParsedAttr.Features.begin(), 13489 Target->getTargetOpts().FeaturesAsWritten.begin(), 13490 Target->getTargetOpts().FeaturesAsWritten.end()); 13491 13492 if (ParsedAttr.CPU != "" && Target->isValidCPUName(ParsedAttr.CPU)) 13493 TargetCPU = ParsedAttr.CPU; 13494 13495 // Now populate the feature map, first with the TargetCPU which is either 13496 // the default or a new one from the target attribute string. Then we'll use 13497 // the passed in features (FeaturesAsWritten) along with the new ones from 13498 // the attribute. 13499 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, 13500 ParsedAttr.Features); 13501 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) { 13502 llvm::SmallVector<StringRef, 32> FeaturesTmp; 13503 Target->getCPUSpecificCPUDispatchFeatures( 13504 SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); 13505 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end()); 13506 Features.insert(Features.begin(), 13507 Target->getTargetOpts().FeaturesAsWritten.begin(), 13508 Target->getTargetOpts().FeaturesAsWritten.end()); 13509 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 13510 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) { 13511 std::vector<std::string> Features; 13512 StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); 13513 if (Target->getTriple().isAArch64()) { 13514 // TargetClones for AArch64 13515 if (VersionStr != "default") { 13516 SmallVector<StringRef, 1> VersionFeatures; 13517 VersionStr.split(VersionFeatures, "+"); 13518 for (auto &VFeature : VersionFeatures) { 13519 VFeature = VFeature.trim(); 13520 // Use '?' to mark features that came from AArch64 TargetClones. 13521 Features.push_back((StringRef{"?"} + VFeature).str()); 13522 } 13523 } 13524 Features.insert(Features.begin(), 13525 Target->getTargetOpts().FeaturesAsWritten.begin(), 13526 Target->getTargetOpts().FeaturesAsWritten.end()); 13527 } else { 13528 if (VersionStr.starts_with("arch=")) 13529 TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1); 13530 else if (VersionStr != "default") 13531 Features.push_back((StringRef{"+"} + VersionStr).str()); 13532 } 13533 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 13534 } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) { 13535 std::vector<std::string> Feats = filterFunctionTargetVersionAttrs(TV); 13536 Feats.insert(Feats.begin(), 13537 Target->getTargetOpts().FeaturesAsWritten.begin(), 13538 Target->getTargetOpts().FeaturesAsWritten.end()); 13539 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Feats); 13540 } else { 13541 FeatureMap = Target->getTargetOpts().FeatureMap; 13542 } 13543 } 13544 13545 OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { 13546 OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); 13547 return *OMPTraitInfoVector.back(); 13548 } 13549 13550 const StreamingDiagnostic &clang:: 13551 operator<<(const StreamingDiagnostic &DB, 13552 const ASTContext::SectionInfo &Section) { 13553 if (Section.Decl) 13554 return DB << Section.Decl; 13555 return DB << "a prior #pragma section"; 13556 } 13557 13558 bool ASTContext::mayExternalize(const Decl *D) const { 13559 bool IsInternalVar = 13560 isa<VarDecl>(D) && 13561 basicGVALinkageForVariable(*this, cast<VarDecl>(D)) == GVA_Internal; 13562 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() && 13563 !D->getAttr<CUDADeviceAttr>()->isImplicit()) || 13564 (D->hasAttr<CUDAConstantAttr>() && 13565 !D->getAttr<CUDAConstantAttr>()->isImplicit()); 13566 // CUDA/HIP: managed variables need to be externalized since it is 13567 // a declaration in IR, therefore cannot have internal linkage. Kernels in 13568 // anonymous name space needs to be externalized to avoid duplicate symbols. 13569 return (IsInternalVar && 13570 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) || 13571 (D->hasAttr<CUDAGlobalAttr>() && 13572 basicGVALinkageForFunction(*this, cast<FunctionDecl>(D)) == 13573 GVA_Internal); 13574 } 13575 13576 bool ASTContext::shouldExternalize(const Decl *D) const { 13577 return mayExternalize(D) && 13578 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() || 13579 CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D))); 13580 } 13581 13582 StringRef ASTContext::getCUIDHash() const { 13583 if (!CUIDHash.empty()) 13584 return CUIDHash; 13585 if (LangOpts.CUID.empty()) 13586 return StringRef(); 13587 CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); 13588 return CUIDHash; 13589 } 13590