1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ASTContext interface. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/AST/ASTContext.h" 14 #include "CXXABI.h" 15 #include "Interp/Context.h" 16 #include "clang/AST/APValue.h" 17 #include "clang/AST/ASTConcept.h" 18 #include "clang/AST/ASTMutationListener.h" 19 #include "clang/AST/ASTTypeTraits.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/AttrIterator.h" 22 #include "clang/AST/CharUnits.h" 23 #include "clang/AST/Comment.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclBase.h" 26 #include "clang/AST/DeclCXX.h" 27 #include "clang/AST/DeclContextInternals.h" 28 #include "clang/AST/DeclObjC.h" 29 #include "clang/AST/DeclOpenMP.h" 30 #include "clang/AST/DeclTemplate.h" 31 #include "clang/AST/DeclarationName.h" 32 #include "clang/AST/DependenceFlags.h" 33 #include "clang/AST/Expr.h" 34 #include "clang/AST/ExprCXX.h" 35 #include "clang/AST/ExprConcepts.h" 36 #include "clang/AST/ExternalASTSource.h" 37 #include "clang/AST/Mangle.h" 38 #include "clang/AST/MangleNumberingContext.h" 39 #include "clang/AST/NestedNameSpecifier.h" 40 #include "clang/AST/ParentMapContext.h" 41 #include "clang/AST/RawCommentList.h" 42 #include "clang/AST/RecordLayout.h" 43 #include "clang/AST/Stmt.h" 44 #include "clang/AST/TemplateBase.h" 45 #include "clang/AST/TemplateName.h" 46 #include "clang/AST/Type.h" 47 #include "clang/AST/TypeLoc.h" 48 #include "clang/AST/UnresolvedSet.h" 49 #include "clang/AST/VTableBuilder.h" 50 #include "clang/Basic/AddressSpaces.h" 51 #include "clang/Basic/Builtins.h" 52 #include "clang/Basic/CommentOptions.h" 53 #include "clang/Basic/ExceptionSpecificationType.h" 54 #include "clang/Basic/IdentifierTable.h" 55 #include "clang/Basic/LLVM.h" 56 #include "clang/Basic/LangOptions.h" 57 #include "clang/Basic/Linkage.h" 58 #include "clang/Basic/Module.h" 59 #include "clang/Basic/NoSanitizeList.h" 60 #include "clang/Basic/ObjCRuntime.h" 61 #include "clang/Basic/ProfileList.h" 62 #include "clang/Basic/SourceLocation.h" 63 #include "clang/Basic/SourceManager.h" 64 #include "clang/Basic/Specifiers.h" 65 #include "clang/Basic/TargetCXXABI.h" 66 #include "clang/Basic/TargetInfo.h" 67 #include "clang/Basic/XRayLists.h" 68 #include "llvm/ADT/APFixedPoint.h" 69 #include "llvm/ADT/APInt.h" 70 #include "llvm/ADT/APSInt.h" 71 #include "llvm/ADT/ArrayRef.h" 72 #include "llvm/ADT/DenseMap.h" 73 #include "llvm/ADT/DenseSet.h" 74 #include "llvm/ADT/FoldingSet.h" 75 #include "llvm/ADT/PointerUnion.h" 76 #include "llvm/ADT/STLExtras.h" 77 #include "llvm/ADT/SmallPtrSet.h" 78 #include "llvm/ADT/SmallVector.h" 79 #include "llvm/ADT/StringExtras.h" 80 #include "llvm/ADT/StringRef.h" 81 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" 82 #include "llvm/Support/Capacity.h" 83 #include "llvm/Support/Casting.h" 84 #include "llvm/Support/Compiler.h" 85 #include "llvm/Support/ErrorHandling.h" 86 #include "llvm/Support/MD5.h" 87 #include "llvm/Support/MathExtras.h" 88 #include "llvm/Support/raw_ostream.h" 89 #include "llvm/TargetParser/Triple.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstddef> 93 #include <cstdint> 94 #include <cstdlib> 95 #include <map> 96 #include <memory> 97 #include <optional> 98 #include <string> 99 #include <tuple> 100 #include <utility> 101 102 using namespace clang; 103 104 enum FloatingRank { 105 BFloat16Rank, 106 Float16Rank, 107 HalfRank, 108 FloatRank, 109 DoubleRank, 110 LongDoubleRank, 111 Float128Rank, 112 Ibm128Rank 113 }; 114 115 /// \returns The locations that are relevant when searching for Doc comments 116 /// related to \p D. 117 static SmallVector<SourceLocation, 2> 118 getDeclLocsForCommentSearch(const Decl *D, SourceManager &SourceMgr) { 119 assert(D); 120 121 // User can not attach documentation to implicit declarations. 122 if (D->isImplicit()) 123 return {}; 124 125 // User can not attach documentation to implicit instantiations. 126 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 127 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 128 return {}; 129 } 130 131 if (const auto *VD = dyn_cast<VarDecl>(D)) { 132 if (VD->isStaticDataMember() && 133 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 134 return {}; 135 } 136 137 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) { 138 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 139 return {}; 140 } 141 142 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) { 143 TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); 144 if (TSK == TSK_ImplicitInstantiation || 145 TSK == TSK_Undeclared) 146 return {}; 147 } 148 149 if (const auto *ED = dyn_cast<EnumDecl>(D)) { 150 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 151 return {}; 152 } 153 if (const auto *TD = dyn_cast<TagDecl>(D)) { 154 // When tag declaration (but not definition!) is part of the 155 // decl-specifier-seq of some other declaration, it doesn't get comment 156 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) 157 return {}; 158 } 159 // TODO: handle comments for function parameters properly. 160 if (isa<ParmVarDecl>(D)) 161 return {}; 162 163 // TODO: we could look up template parameter documentation in the template 164 // documentation. 165 if (isa<TemplateTypeParmDecl>(D) || 166 isa<NonTypeTemplateParmDecl>(D) || 167 isa<TemplateTemplateParmDecl>(D)) 168 return {}; 169 170 SmallVector<SourceLocation, 2> Locations; 171 // Find declaration location. 172 // For Objective-C declarations we generally don't expect to have multiple 173 // declarators, thus use declaration starting location as the "declaration 174 // location". 175 // For all other declarations multiple declarators are used quite frequently, 176 // so we use the location of the identifier as the "declaration location". 177 SourceLocation BaseLocation; 178 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || 179 isa<ObjCPropertyDecl>(D) || isa<RedeclarableTemplateDecl>(D) || 180 isa<ClassTemplateSpecializationDecl>(D) || 181 // Allow association with Y across {} in `typedef struct X {} Y`. 182 isa<TypedefDecl>(D)) 183 BaseLocation = D->getBeginLoc(); 184 else 185 BaseLocation = D->getLocation(); 186 187 if (!D->getLocation().isMacroID()) { 188 Locations.emplace_back(BaseLocation); 189 } else { 190 const auto *DeclCtx = D->getDeclContext(); 191 192 // When encountering definitions generated from a macro (that are not 193 // contained by another declaration in the macro) we need to try and find 194 // the comment at the location of the expansion but if there is no comment 195 // there we should retry to see if there is a comment inside the macro as 196 // well. To this end we return first BaseLocation to first look at the 197 // expansion site, the second value is the spelling location of the 198 // beginning of the declaration defined inside the macro. 199 if (!(DeclCtx && 200 Decl::castFromDeclContext(DeclCtx)->getLocation().isMacroID())) { 201 Locations.emplace_back(SourceMgr.getExpansionLoc(BaseLocation)); 202 } 203 204 // We use Decl::getBeginLoc() and not just BaseLocation here to ensure that 205 // we don't refer to the macro argument location at the expansion site (this 206 // can happen if the name's spelling is provided via macro argument), and 207 // always to the declaration itself. 208 Locations.emplace_back(SourceMgr.getSpellingLoc(D->getBeginLoc())); 209 } 210 211 return Locations; 212 } 213 214 RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( 215 const Decl *D, const SourceLocation RepresentativeLocForDecl, 216 const std::map<unsigned, RawComment *> &CommentsInTheFile) const { 217 // If the declaration doesn't map directly to a location in a file, we 218 // can't find the comment. 219 if (RepresentativeLocForDecl.isInvalid() || 220 !RepresentativeLocForDecl.isFileID()) 221 return nullptr; 222 223 // If there are no comments anywhere, we won't find anything. 224 if (CommentsInTheFile.empty()) 225 return nullptr; 226 227 // Decompose the location for the declaration and find the beginning of the 228 // file buffer. 229 const std::pair<FileID, unsigned> DeclLocDecomp = 230 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); 231 232 // Slow path. 233 auto OffsetCommentBehindDecl = 234 CommentsInTheFile.lower_bound(DeclLocDecomp.second); 235 236 // First check whether we have a trailing comment. 237 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { 238 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; 239 if ((CommentBehindDecl->isDocumentation() || 240 LangOpts.CommentOpts.ParseAllComments) && 241 CommentBehindDecl->isTrailingComment() && 242 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || 243 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { 244 245 // Check that Doxygen trailing comment comes after the declaration, starts 246 // on the same line and in the same file as the declaration. 247 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == 248 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, 249 OffsetCommentBehindDecl->first)) { 250 return CommentBehindDecl; 251 } 252 } 253 } 254 255 // The comment just after the declaration was not a trailing comment. 256 // Let's look at the previous comment. 257 if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) 258 return nullptr; 259 260 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; 261 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; 262 263 // Check that we actually have a non-member Doxygen comment. 264 if (!(CommentBeforeDecl->isDocumentation() || 265 LangOpts.CommentOpts.ParseAllComments) || 266 CommentBeforeDecl->isTrailingComment()) 267 return nullptr; 268 269 // Decompose the end of the comment. 270 const unsigned CommentEndOffset = 271 Comments.getCommentEndOffset(CommentBeforeDecl); 272 273 // Get the corresponding buffer. 274 bool Invalid = false; 275 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, 276 &Invalid).data(); 277 if (Invalid) 278 return nullptr; 279 280 // Extract text between the comment and declaration. 281 StringRef Text(Buffer + CommentEndOffset, 282 DeclLocDecomp.second - CommentEndOffset); 283 284 // There should be no other declarations or preprocessor directives between 285 // comment and declaration. 286 if (Text.find_last_of(";{}#@") != StringRef::npos) 287 return nullptr; 288 289 return CommentBeforeDecl; 290 } 291 292 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { 293 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr); 294 295 for (const auto DeclLoc : DeclLocs) { 296 // If the declaration doesn't map directly to a location in a file, we 297 // can't find the comment. 298 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 299 continue; 300 301 if (ExternalSource && !CommentsLoaded) { 302 ExternalSource->ReadComments(); 303 CommentsLoaded = true; 304 } 305 306 if (Comments.empty()) 307 continue; 308 309 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; 310 if (!File.isValid()) 311 continue; 312 313 const auto CommentsInThisFile = Comments.getCommentsInFile(File); 314 if (!CommentsInThisFile || CommentsInThisFile->empty()) 315 continue; 316 317 if (RawComment *Comment = 318 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) 319 return Comment; 320 } 321 322 return nullptr; 323 } 324 325 void ASTContext::addComment(const RawComment &RC) { 326 assert(LangOpts.RetainCommentsFromSystemHeaders || 327 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())); 328 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); 329 } 330 331 /// If we have a 'templated' declaration for a template, adjust 'D' to 332 /// refer to the actual template. 333 /// If we have an implicit instantiation, adjust 'D' to refer to template. 334 static const Decl &adjustDeclToTemplate(const Decl &D) { 335 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) { 336 // Is this function declaration part of a function template? 337 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) 338 return *FTD; 339 340 // Nothing to do if function is not an implicit instantiation. 341 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) 342 return D; 343 344 // Function is an implicit instantiation of a function template? 345 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) 346 return *FTD; 347 348 // Function is instantiated from a member definition of a class template? 349 if (const FunctionDecl *MemberDecl = 350 FD->getInstantiatedFromMemberFunction()) 351 return *MemberDecl; 352 353 return D; 354 } 355 if (const auto *VD = dyn_cast<VarDecl>(&D)) { 356 // Static data member is instantiated from a member definition of a class 357 // template? 358 if (VD->isStaticDataMember()) 359 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) 360 return *MemberDecl; 361 362 return D; 363 } 364 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) { 365 // Is this class declaration part of a class template? 366 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) 367 return *CTD; 368 369 // Class is an implicit instantiation of a class template or partial 370 // specialization? 371 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { 372 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) 373 return D; 374 llvm::PointerUnion<ClassTemplateDecl *, 375 ClassTemplatePartialSpecializationDecl *> 376 PU = CTSD->getSpecializedTemplateOrPartial(); 377 return PU.is<ClassTemplateDecl *>() 378 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>()) 379 : *static_cast<const Decl *>( 380 PU.get<ClassTemplatePartialSpecializationDecl *>()); 381 } 382 383 // Class is instantiated from a member definition of a class template? 384 if (const MemberSpecializationInfo *Info = 385 CRD->getMemberSpecializationInfo()) 386 return *Info->getInstantiatedFrom(); 387 388 return D; 389 } 390 if (const auto *ED = dyn_cast<EnumDecl>(&D)) { 391 // Enum is instantiated from a member definition of a class template? 392 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) 393 return *MemberDecl; 394 395 return D; 396 } 397 // FIXME: Adjust alias templates? 398 return D; 399 } 400 401 const RawComment *ASTContext::getRawCommentForAnyRedecl( 402 const Decl *D, 403 const Decl **OriginalDecl) const { 404 if (!D) { 405 if (OriginalDecl) 406 OriginalDecl = nullptr; 407 return nullptr; 408 } 409 410 D = &adjustDeclToTemplate(*D); 411 412 // Any comment directly attached to D? 413 { 414 auto DeclComment = DeclRawComments.find(D); 415 if (DeclComment != DeclRawComments.end()) { 416 if (OriginalDecl) 417 *OriginalDecl = D; 418 return DeclComment->second; 419 } 420 } 421 422 // Any comment attached to any redeclaration of D? 423 const Decl *CanonicalD = D->getCanonicalDecl(); 424 if (!CanonicalD) 425 return nullptr; 426 427 { 428 auto RedeclComment = RedeclChainComments.find(CanonicalD); 429 if (RedeclComment != RedeclChainComments.end()) { 430 if (OriginalDecl) 431 *OriginalDecl = RedeclComment->second; 432 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); 433 assert(CommentAtRedecl != DeclRawComments.end() && 434 "This decl is supposed to have comment attached."); 435 return CommentAtRedecl->second; 436 } 437 } 438 439 // Any redeclarations of D that we haven't checked for comments yet? 440 // We can't use DenseMap::iterator directly since it'd get invalid. 441 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { 442 return CommentlessRedeclChains.lookup(CanonicalD); 443 }(); 444 445 for (const auto Redecl : D->redecls()) { 446 assert(Redecl); 447 // Skip all redeclarations that have been checked previously. 448 if (LastCheckedRedecl) { 449 if (LastCheckedRedecl == Redecl) { 450 LastCheckedRedecl = nullptr; 451 } 452 continue; 453 } 454 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); 455 if (RedeclComment) { 456 cacheRawCommentForDecl(*Redecl, *RedeclComment); 457 if (OriginalDecl) 458 *OriginalDecl = Redecl; 459 return RedeclComment; 460 } 461 CommentlessRedeclChains[CanonicalD] = Redecl; 462 } 463 464 if (OriginalDecl) 465 *OriginalDecl = nullptr; 466 return nullptr; 467 } 468 469 void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, 470 const RawComment &Comment) const { 471 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments); 472 DeclRawComments.try_emplace(&OriginalD, &Comment); 473 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); 474 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); 475 CommentlessRedeclChains.erase(CanonicalDecl); 476 } 477 478 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, 479 SmallVectorImpl<const NamedDecl *> &Redeclared) { 480 const DeclContext *DC = ObjCMethod->getDeclContext(); 481 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) { 482 const ObjCInterfaceDecl *ID = IMD->getClassInterface(); 483 if (!ID) 484 return; 485 // Add redeclared method here. 486 for (const auto *Ext : ID->known_extensions()) { 487 if (ObjCMethodDecl *RedeclaredMethod = 488 Ext->getMethod(ObjCMethod->getSelector(), 489 ObjCMethod->isInstanceMethod())) 490 Redeclared.push_back(RedeclaredMethod); 491 } 492 } 493 } 494 495 void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, 496 const Preprocessor *PP) { 497 if (Comments.empty() || Decls.empty()) 498 return; 499 500 FileID File; 501 for (Decl *D : Decls) { 502 SourceLocation Loc = D->getLocation(); 503 if (Loc.isValid()) { 504 // See if there are any new comments that are not attached to a decl. 505 // The location doesn't have to be precise - we care only about the file. 506 File = SourceMgr.getDecomposedLoc(Loc).first; 507 break; 508 } 509 } 510 511 if (File.isInvalid()) 512 return; 513 514 auto CommentsInThisFile = Comments.getCommentsInFile(File); 515 if (!CommentsInThisFile || CommentsInThisFile->empty() || 516 CommentsInThisFile->rbegin()->second->isAttached()) 517 return; 518 519 // There is at least one comment not attached to a decl. 520 // Maybe it should be attached to one of Decls? 521 // 522 // Note that this way we pick up not only comments that precede the 523 // declaration, but also comments that *follow* the declaration -- thanks to 524 // the lookahead in the lexer: we've consumed the semicolon and looked 525 // ahead through comments. 526 for (const Decl *D : Decls) { 527 assert(D); 528 if (D->isInvalidDecl()) 529 continue; 530 531 D = &adjustDeclToTemplate(*D); 532 533 if (DeclRawComments.count(D) > 0) 534 continue; 535 536 const auto DeclLocs = getDeclLocsForCommentSearch(D, SourceMgr); 537 538 for (const auto DeclLoc : DeclLocs) { 539 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 540 continue; 541 542 if (RawComment *const DocComment = getRawCommentForDeclNoCacheImpl( 543 D, DeclLoc, *CommentsInThisFile)) { 544 cacheRawCommentForDecl(*D, *DocComment); 545 comments::FullComment *FC = DocComment->parse(*this, PP, D); 546 ParsedComments[D->getCanonicalDecl()] = FC; 547 break; 548 } 549 } 550 } 551 } 552 553 comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, 554 const Decl *D) const { 555 auto *ThisDeclInfo = new (*this) comments::DeclInfo; 556 ThisDeclInfo->CommentDecl = D; 557 ThisDeclInfo->IsFilled = false; 558 ThisDeclInfo->fill(); 559 ThisDeclInfo->CommentDecl = FC->getDecl(); 560 if (!ThisDeclInfo->TemplateParameters) 561 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; 562 comments::FullComment *CFC = 563 new (*this) comments::FullComment(FC->getBlocks(), 564 ThisDeclInfo); 565 return CFC; 566 } 567 568 comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { 569 const RawComment *RC = getRawCommentForDeclNoCache(D); 570 return RC ? RC->parse(*this, nullptr, D) : nullptr; 571 } 572 573 comments::FullComment *ASTContext::getCommentForDecl( 574 const Decl *D, 575 const Preprocessor *PP) const { 576 if (!D || D->isInvalidDecl()) 577 return nullptr; 578 D = &adjustDeclToTemplate(*D); 579 580 const Decl *Canonical = D->getCanonicalDecl(); 581 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = 582 ParsedComments.find(Canonical); 583 584 if (Pos != ParsedComments.end()) { 585 if (Canonical != D) { 586 comments::FullComment *FC = Pos->second; 587 comments::FullComment *CFC = cloneFullComment(FC, D); 588 return CFC; 589 } 590 return Pos->second; 591 } 592 593 const Decl *OriginalDecl = nullptr; 594 595 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); 596 if (!RC) { 597 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { 598 SmallVector<const NamedDecl*, 8> Overridden; 599 const auto *OMD = dyn_cast<ObjCMethodDecl>(D); 600 if (OMD && OMD->isPropertyAccessor()) 601 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) 602 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) 603 return cloneFullComment(FC, D); 604 if (OMD) 605 addRedeclaredMethods(OMD, Overridden); 606 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); 607 for (unsigned i = 0, e = Overridden.size(); i < e; i++) 608 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) 609 return cloneFullComment(FC, D); 610 } 611 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) { 612 // Attach any tag type's documentation to its typedef if latter 613 // does not have one of its own. 614 QualType QT = TD->getUnderlyingType(); 615 if (const auto *TT = QT->getAs<TagType>()) 616 if (const Decl *TD = TT->getDecl()) 617 if (comments::FullComment *FC = getCommentForDecl(TD, PP)) 618 return cloneFullComment(FC, D); 619 } 620 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) { 621 while (IC->getSuperClass()) { 622 IC = IC->getSuperClass(); 623 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 624 return cloneFullComment(FC, D); 625 } 626 } 627 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) { 628 if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) 629 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 630 return cloneFullComment(FC, D); 631 } 632 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { 633 if (!(RD = RD->getDefinition())) 634 return nullptr; 635 // Check non-virtual bases. 636 for (const auto &I : RD->bases()) { 637 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) 638 continue; 639 QualType Ty = I.getType(); 640 if (Ty.isNull()) 641 continue; 642 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { 643 if (!(NonVirtualBase= NonVirtualBase->getDefinition())) 644 continue; 645 646 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) 647 return cloneFullComment(FC, D); 648 } 649 } 650 // Check virtual bases. 651 for (const auto &I : RD->vbases()) { 652 if (I.getAccessSpecifier() != AS_public) 653 continue; 654 QualType Ty = I.getType(); 655 if (Ty.isNull()) 656 continue; 657 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { 658 if (!(VirtualBase= VirtualBase->getDefinition())) 659 continue; 660 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) 661 return cloneFullComment(FC, D); 662 } 663 } 664 } 665 return nullptr; 666 } 667 668 // If the RawComment was attached to other redeclaration of this Decl, we 669 // should parse the comment in context of that other Decl. This is important 670 // because comments can contain references to parameter names which can be 671 // different across redeclarations. 672 if (D != OriginalDecl && OriginalDecl) 673 return getCommentForDecl(OriginalDecl, PP); 674 675 comments::FullComment *FC = RC->parse(*this, PP, D); 676 ParsedComments[Canonical] = FC; 677 return FC; 678 } 679 680 void 681 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 682 const ASTContext &C, 683 TemplateTemplateParmDecl *Parm) { 684 ID.AddInteger(Parm->getDepth()); 685 ID.AddInteger(Parm->getPosition()); 686 ID.AddBoolean(Parm->isParameterPack()); 687 688 TemplateParameterList *Params = Parm->getTemplateParameters(); 689 ID.AddInteger(Params->size()); 690 for (TemplateParameterList::const_iterator P = Params->begin(), 691 PEnd = Params->end(); 692 P != PEnd; ++P) { 693 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 694 ID.AddInteger(0); 695 ID.AddBoolean(TTP->isParameterPack()); 696 if (TTP->isExpandedParameterPack()) { 697 ID.AddBoolean(true); 698 ID.AddInteger(TTP->getNumExpansionParameters()); 699 } else 700 ID.AddBoolean(false); 701 continue; 702 } 703 704 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 705 ID.AddInteger(1); 706 ID.AddBoolean(NTTP->isParameterPack()); 707 ID.AddPointer(C.getUnconstrainedType(C.getCanonicalType(NTTP->getType())) 708 .getAsOpaquePtr()); 709 if (NTTP->isExpandedParameterPack()) { 710 ID.AddBoolean(true); 711 ID.AddInteger(NTTP->getNumExpansionTypes()); 712 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 713 QualType T = NTTP->getExpansionType(I); 714 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); 715 } 716 } else 717 ID.AddBoolean(false); 718 continue; 719 } 720 721 auto *TTP = cast<TemplateTemplateParmDecl>(*P); 722 ID.AddInteger(2); 723 Profile(ID, C, TTP); 724 } 725 } 726 727 TemplateTemplateParmDecl * 728 ASTContext::getCanonicalTemplateTemplateParmDecl( 729 TemplateTemplateParmDecl *TTP) const { 730 // Check if we already have a canonical template template parameter. 731 llvm::FoldingSetNodeID ID; 732 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); 733 void *InsertPos = nullptr; 734 CanonicalTemplateTemplateParm *Canonical 735 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 736 if (Canonical) 737 return Canonical->getParam(); 738 739 // Build a canonical template parameter list. 740 TemplateParameterList *Params = TTP->getTemplateParameters(); 741 SmallVector<NamedDecl *, 4> CanonParams; 742 CanonParams.reserve(Params->size()); 743 for (TemplateParameterList::const_iterator P = Params->begin(), 744 PEnd = Params->end(); 745 P != PEnd; ++P) { 746 // Note that, per C++20 [temp.over.link]/6, when determining whether 747 // template-parameters are equivalent, constraints are ignored. 748 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 749 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create( 750 *this, getTranslationUnitDecl(), SourceLocation(), SourceLocation(), 751 TTP->getDepth(), TTP->getIndex(), nullptr, false, 752 TTP->isParameterPack(), /*HasTypeConstraint=*/false, 753 TTP->isExpandedParameterPack() 754 ? std::optional<unsigned>(TTP->getNumExpansionParameters()) 755 : std::nullopt); 756 CanonParams.push_back(NewTTP); 757 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 758 QualType T = getUnconstrainedType(getCanonicalType(NTTP->getType())); 759 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 760 NonTypeTemplateParmDecl *Param; 761 if (NTTP->isExpandedParameterPack()) { 762 SmallVector<QualType, 2> ExpandedTypes; 763 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 764 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 765 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 766 ExpandedTInfos.push_back( 767 getTrivialTypeSourceInfo(ExpandedTypes.back())); 768 } 769 770 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 771 SourceLocation(), 772 SourceLocation(), 773 NTTP->getDepth(), 774 NTTP->getPosition(), nullptr, 775 T, 776 TInfo, 777 ExpandedTypes, 778 ExpandedTInfos); 779 } else { 780 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 781 SourceLocation(), 782 SourceLocation(), 783 NTTP->getDepth(), 784 NTTP->getPosition(), nullptr, 785 T, 786 NTTP->isParameterPack(), 787 TInfo); 788 } 789 CanonParams.push_back(Param); 790 } else 791 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 792 cast<TemplateTemplateParmDecl>(*P))); 793 } 794 795 TemplateTemplateParmDecl *CanonTTP = TemplateTemplateParmDecl::Create( 796 *this, getTranslationUnitDecl(), SourceLocation(), TTP->getDepth(), 797 TTP->getPosition(), TTP->isParameterPack(), nullptr, 798 TemplateParameterList::Create(*this, SourceLocation(), SourceLocation(), 799 CanonParams, SourceLocation(), 800 /*RequiresClause=*/nullptr)); 801 802 // Get the new insert position for the node we care about. 803 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 804 assert(!Canonical && "Shouldn't be in the map!"); 805 (void)Canonical; 806 807 // Create the canonical template template parameter entry. 808 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 809 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 810 return CanonTTP; 811 } 812 813 TargetCXXABI::Kind ASTContext::getCXXABIKind() const { 814 auto Kind = getTargetInfo().getCXXABI().getKind(); 815 return getLangOpts().CXXABI.value_or(Kind); 816 } 817 818 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 819 if (!LangOpts.CPlusPlus) return nullptr; 820 821 switch (getCXXABIKind()) { 822 case TargetCXXABI::AppleARM64: 823 case TargetCXXABI::Fuchsia: 824 case TargetCXXABI::GenericARM: // Same as Itanium at this level 825 case TargetCXXABI::iOS: 826 case TargetCXXABI::WatchOS: 827 case TargetCXXABI::GenericAArch64: 828 case TargetCXXABI::GenericMIPS: 829 case TargetCXXABI::GenericItanium: 830 case TargetCXXABI::WebAssembly: 831 case TargetCXXABI::XL: 832 return CreateItaniumCXXABI(*this); 833 case TargetCXXABI::Microsoft: 834 return CreateMicrosoftCXXABI(*this); 835 } 836 llvm_unreachable("Invalid CXXABI type!"); 837 } 838 839 interp::Context &ASTContext::getInterpContext() { 840 if (!InterpContext) { 841 InterpContext.reset(new interp::Context(*this)); 842 } 843 return *InterpContext.get(); 844 } 845 846 ParentMapContext &ASTContext::getParentMapContext() { 847 if (!ParentMapCtx) 848 ParentMapCtx.reset(new ParentMapContext(*this)); 849 return *ParentMapCtx.get(); 850 } 851 852 static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, 853 const LangOptions &LangOpts) { 854 switch (LangOpts.getAddressSpaceMapMangling()) { 855 case LangOptions::ASMM_Target: 856 return TI.useAddressSpaceMapMangling(); 857 case LangOptions::ASMM_On: 858 return true; 859 case LangOptions::ASMM_Off: 860 return false; 861 } 862 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); 863 } 864 865 ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, 866 IdentifierTable &idents, SelectorTable &sels, 867 Builtin::Context &builtins, TranslationUnitKind TUKind) 868 : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize), 869 DependentSizedArrayTypes(this_()), DependentSizedExtVectorTypes(this_()), 870 DependentAddressSpaceTypes(this_()), DependentVectorTypes(this_()), 871 DependentSizedMatrixTypes(this_()), 872 FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize), 873 DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()), 874 TemplateSpecializationTypes(this_()), 875 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), 876 DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()), 877 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), 878 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), 879 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, 880 LangOpts.XRayNeverInstrumentFiles, 881 LangOpts.XRayAttrListFiles, SM)), 882 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)), 883 PrintingPolicy(LOpts), Idents(idents), Selectors(sels), 884 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this), 885 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), 886 CompCategories(this_()), LastSDM(nullptr, 0) { 887 addTranslationUnitDecl(); 888 } 889 890 void ASTContext::cleanup() { 891 // Release the DenseMaps associated with DeclContext objects. 892 // FIXME: Is this the ideal solution? 893 ReleaseDeclContextMaps(); 894 895 // Call all of the deallocation functions on all of their targets. 896 for (auto &Pair : Deallocations) 897 (Pair.first)(Pair.second); 898 Deallocations.clear(); 899 900 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 901 // because they can contain DenseMaps. 902 for (llvm::DenseMap<const ObjCContainerDecl*, 903 const ASTRecordLayout*>::iterator 904 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 905 // Increment in loop to prevent using deallocated memory. 906 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 907 R->Destroy(*this); 908 ObjCLayouts.clear(); 909 910 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 911 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 912 // Increment in loop to prevent using deallocated memory. 913 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 914 R->Destroy(*this); 915 } 916 ASTRecordLayouts.clear(); 917 918 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 919 AEnd = DeclAttrs.end(); 920 A != AEnd; ++A) 921 A->second->~AttrVec(); 922 DeclAttrs.clear(); 923 924 for (const auto &Value : ModuleInitializers) 925 Value.second->~PerModuleInitializers(); 926 ModuleInitializers.clear(); 927 } 928 929 ASTContext::~ASTContext() { cleanup(); } 930 931 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) { 932 TraversalScope = TopLevelDecls; 933 getParentMapContext().clear(); 934 } 935 936 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { 937 Deallocations.push_back({Callback, Data}); 938 } 939 940 void 941 ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) { 942 ExternalSource = std::move(Source); 943 } 944 945 void ASTContext::PrintStats() const { 946 llvm::errs() << "\n*** AST Context Stats:\n"; 947 llvm::errs() << " " << Types.size() << " types total.\n"; 948 949 unsigned counts[] = { 950 #define TYPE(Name, Parent) 0, 951 #define ABSTRACT_TYPE(Name, Parent) 952 #include "clang/AST/TypeNodes.inc" 953 0 // Extra 954 }; 955 956 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 957 Type *T = Types[i]; 958 counts[(unsigned)T->getTypeClass()]++; 959 } 960 961 unsigned Idx = 0; 962 unsigned TotalBytes = 0; 963 #define TYPE(Name, Parent) \ 964 if (counts[Idx]) \ 965 llvm::errs() << " " << counts[Idx] << " " << #Name \ 966 << " types, " << sizeof(Name##Type) << " each " \ 967 << "(" << counts[Idx] * sizeof(Name##Type) \ 968 << " bytes)\n"; \ 969 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 970 ++Idx; 971 #define ABSTRACT_TYPE(Name, Parent) 972 #include "clang/AST/TypeNodes.inc" 973 974 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 975 976 // Implicit special member functions. 977 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 978 << NumImplicitDefaultConstructors 979 << " implicit default constructors created\n"; 980 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 981 << NumImplicitCopyConstructors 982 << " implicit copy constructors created\n"; 983 if (getLangOpts().CPlusPlus) 984 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 985 << NumImplicitMoveConstructors 986 << " implicit move constructors created\n"; 987 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 988 << NumImplicitCopyAssignmentOperators 989 << " implicit copy assignment operators created\n"; 990 if (getLangOpts().CPlusPlus) 991 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 992 << NumImplicitMoveAssignmentOperators 993 << " implicit move assignment operators created\n"; 994 llvm::errs() << NumImplicitDestructorsDeclared << "/" 995 << NumImplicitDestructors 996 << " implicit destructors created\n"; 997 998 if (ExternalSource) { 999 llvm::errs() << "\n"; 1000 ExternalSource->PrintStats(); 1001 } 1002 1003 BumpAlloc.PrintStats(); 1004 } 1005 1006 void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, 1007 bool NotifyListeners) { 1008 if (NotifyListeners) 1009 if (auto *Listener = getASTMutationListener()) 1010 Listener->RedefinedHiddenDefinition(ND, M); 1011 1012 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M); 1013 } 1014 1015 void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { 1016 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl())); 1017 if (It == MergedDefModules.end()) 1018 return; 1019 1020 auto &Merged = It->second; 1021 llvm::DenseSet<Module*> Found; 1022 for (Module *&M : Merged) 1023 if (!Found.insert(M).second) 1024 M = nullptr; 1025 llvm::erase(Merged, nullptr); 1026 } 1027 1028 ArrayRef<Module *> 1029 ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { 1030 auto MergedIt = 1031 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); 1032 if (MergedIt == MergedDefModules.end()) 1033 return std::nullopt; 1034 return MergedIt->second; 1035 } 1036 1037 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { 1038 if (LazyInitializers.empty()) 1039 return; 1040 1041 auto *Source = Ctx.getExternalSource(); 1042 assert(Source && "lazy initializers but no external source"); 1043 1044 auto LazyInits = std::move(LazyInitializers); 1045 LazyInitializers.clear(); 1046 1047 for (auto ID : LazyInits) 1048 Initializers.push_back(Source->GetExternalDecl(ID)); 1049 1050 assert(LazyInitializers.empty() && 1051 "GetExternalDecl for lazy module initializer added more inits"); 1052 } 1053 1054 void ASTContext::addModuleInitializer(Module *M, Decl *D) { 1055 // One special case: if we add a module initializer that imports another 1056 // module, and that module's only initializer is an ImportDecl, simplify. 1057 if (const auto *ID = dyn_cast<ImportDecl>(D)) { 1058 auto It = ModuleInitializers.find(ID->getImportedModule()); 1059 1060 // Maybe the ImportDecl does nothing at all. (Common case.) 1061 if (It == ModuleInitializers.end()) 1062 return; 1063 1064 // Maybe the ImportDecl only imports another ImportDecl. 1065 auto &Imported = *It->second; 1066 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { 1067 Imported.resolve(*this); 1068 auto *OnlyDecl = Imported.Initializers.front(); 1069 if (isa<ImportDecl>(OnlyDecl)) 1070 D = OnlyDecl; 1071 } 1072 } 1073 1074 auto *&Inits = ModuleInitializers[M]; 1075 if (!Inits) 1076 Inits = new (*this) PerModuleInitializers; 1077 Inits->Initializers.push_back(D); 1078 } 1079 1080 void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { 1081 auto *&Inits = ModuleInitializers[M]; 1082 if (!Inits) 1083 Inits = new (*this) PerModuleInitializers; 1084 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), 1085 IDs.begin(), IDs.end()); 1086 } 1087 1088 ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) { 1089 auto It = ModuleInitializers.find(M); 1090 if (It == ModuleInitializers.end()) 1091 return std::nullopt; 1092 1093 auto *Inits = It->second; 1094 Inits->resolve(*this); 1095 return Inits->Initializers; 1096 } 1097 1098 void ASTContext::setCurrentNamedModule(Module *M) { 1099 assert(M->isNamedModule()); 1100 assert(!CurrentCXXNamedModule && 1101 "We should set named module for ASTContext for only once"); 1102 CurrentCXXNamedModule = M; 1103 } 1104 1105 ExternCContextDecl *ASTContext::getExternCContextDecl() const { 1106 if (!ExternCContext) 1107 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); 1108 1109 return ExternCContext; 1110 } 1111 1112 BuiltinTemplateDecl * 1113 ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, 1114 const IdentifierInfo *II) const { 1115 auto *BuiltinTemplate = 1116 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK); 1117 BuiltinTemplate->setImplicit(); 1118 getTranslationUnitDecl()->addDecl(BuiltinTemplate); 1119 1120 return BuiltinTemplate; 1121 } 1122 1123 BuiltinTemplateDecl * 1124 ASTContext::getMakeIntegerSeqDecl() const { 1125 if (!MakeIntegerSeqDecl) 1126 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, 1127 getMakeIntegerSeqName()); 1128 return MakeIntegerSeqDecl; 1129 } 1130 1131 BuiltinTemplateDecl * 1132 ASTContext::getTypePackElementDecl() const { 1133 if (!TypePackElementDecl) 1134 TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, 1135 getTypePackElementName()); 1136 return TypePackElementDecl; 1137 } 1138 1139 RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, 1140 RecordDecl::TagKind TK) const { 1141 SourceLocation Loc; 1142 RecordDecl *NewDecl; 1143 if (getLangOpts().CPlusPlus) 1144 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, 1145 Loc, &Idents.get(Name)); 1146 else 1147 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, 1148 &Idents.get(Name)); 1149 NewDecl->setImplicit(); 1150 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( 1151 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); 1152 return NewDecl; 1153 } 1154 1155 TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, 1156 StringRef Name) const { 1157 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 1158 TypedefDecl *NewDecl = TypedefDecl::Create( 1159 const_cast<ASTContext &>(*this), getTranslationUnitDecl(), 1160 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); 1161 NewDecl->setImplicit(); 1162 return NewDecl; 1163 } 1164 1165 TypedefDecl *ASTContext::getInt128Decl() const { 1166 if (!Int128Decl) 1167 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); 1168 return Int128Decl; 1169 } 1170 1171 TypedefDecl *ASTContext::getUInt128Decl() const { 1172 if (!UInt128Decl) 1173 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); 1174 return UInt128Decl; 1175 } 1176 1177 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 1178 auto *Ty = new (*this, alignof(BuiltinType)) BuiltinType(K); 1179 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 1180 Types.push_back(Ty); 1181 } 1182 1183 void ASTContext::InitBuiltinTypes(const TargetInfo &Target, 1184 const TargetInfo *AuxTarget) { 1185 assert((!this->Target || this->Target == &Target) && 1186 "Incorrect target reinitialization"); 1187 assert(VoidTy.isNull() && "Context reinitialized?"); 1188 1189 this->Target = &Target; 1190 this->AuxTarget = AuxTarget; 1191 1192 ABI.reset(createCXXABI(Target)); 1193 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); 1194 1195 // C99 6.2.5p19. 1196 InitBuiltinType(VoidTy, BuiltinType::Void); 1197 1198 // C99 6.2.5p2. 1199 InitBuiltinType(BoolTy, BuiltinType::Bool); 1200 // C99 6.2.5p3. 1201 if (LangOpts.CharIsSigned) 1202 InitBuiltinType(CharTy, BuiltinType::Char_S); 1203 else 1204 InitBuiltinType(CharTy, BuiltinType::Char_U); 1205 // C99 6.2.5p4. 1206 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 1207 InitBuiltinType(ShortTy, BuiltinType::Short); 1208 InitBuiltinType(IntTy, BuiltinType::Int); 1209 InitBuiltinType(LongTy, BuiltinType::Long); 1210 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 1211 1212 // C99 6.2.5p6. 1213 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 1214 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 1215 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 1216 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 1217 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 1218 1219 // C99 6.2.5p10. 1220 InitBuiltinType(FloatTy, BuiltinType::Float); 1221 InitBuiltinType(DoubleTy, BuiltinType::Double); 1222 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 1223 1224 // GNU extension, __float128 for IEEE quadruple precision 1225 InitBuiltinType(Float128Ty, BuiltinType::Float128); 1226 1227 // __ibm128 for IBM extended precision 1228 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128); 1229 1230 // C11 extension ISO/IEC TS 18661-3 1231 InitBuiltinType(Float16Ty, BuiltinType::Float16); 1232 1233 // ISO/IEC JTC1 SC22 WG14 N1169 Extension 1234 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); 1235 InitBuiltinType(AccumTy, BuiltinType::Accum); 1236 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); 1237 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); 1238 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); 1239 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); 1240 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); 1241 InitBuiltinType(FractTy, BuiltinType::Fract); 1242 InitBuiltinType(LongFractTy, BuiltinType::LongFract); 1243 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); 1244 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); 1245 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); 1246 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); 1247 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); 1248 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); 1249 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); 1250 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); 1251 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); 1252 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); 1253 InitBuiltinType(SatFractTy, BuiltinType::SatFract); 1254 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); 1255 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); 1256 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); 1257 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); 1258 1259 // GNU extension, 128-bit integers. 1260 InitBuiltinType(Int128Ty, BuiltinType::Int128); 1261 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 1262 1263 // C++ 3.9.1p5 1264 if (TargetInfo::isTypeSigned(Target.getWCharType())) 1265 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 1266 else // -fshort-wchar makes wchar_t be unsigned. 1267 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 1268 if (LangOpts.CPlusPlus && LangOpts.WChar) 1269 WideCharTy = WCharTy; 1270 else { 1271 // C99 (or C++ using -fno-wchar). 1272 WideCharTy = getFromTargetType(Target.getWCharType()); 1273 } 1274 1275 WIntTy = getFromTargetType(Target.getWIntType()); 1276 1277 // C++20 (proposed) 1278 InitBuiltinType(Char8Ty, BuiltinType::Char8); 1279 1280 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1281 InitBuiltinType(Char16Ty, BuiltinType::Char16); 1282 else // C99 1283 Char16Ty = getFromTargetType(Target.getChar16Type()); 1284 1285 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1286 InitBuiltinType(Char32Ty, BuiltinType::Char32); 1287 else // C99 1288 Char32Ty = getFromTargetType(Target.getChar32Type()); 1289 1290 // Placeholder type for type-dependent expressions whose type is 1291 // completely unknown. No code should ever check a type against 1292 // DependentTy and users should never see it; however, it is here to 1293 // help diagnose failures to properly check for type-dependent 1294 // expressions. 1295 InitBuiltinType(DependentTy, BuiltinType::Dependent); 1296 1297 // Placeholder type for functions. 1298 InitBuiltinType(OverloadTy, BuiltinType::Overload); 1299 1300 // Placeholder type for bound members. 1301 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 1302 1303 // Placeholder type for pseudo-objects. 1304 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); 1305 1306 // "any" type; useful for debugger-like clients. 1307 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 1308 1309 // Placeholder type for unbridged ARC casts. 1310 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 1311 1312 // Placeholder type for builtin functions. 1313 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); 1314 1315 // Placeholder type for OMP array sections. 1316 if (LangOpts.OpenMP) { 1317 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1318 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); 1319 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); 1320 } 1321 if (LangOpts.MatrixTypes) 1322 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); 1323 1324 // Builtin types for 'id', 'Class', and 'SEL'. 1325 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 1326 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 1327 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 1328 1329 if (LangOpts.OpenCL) { 1330 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 1331 InitBuiltinType(SingletonId, BuiltinType::Id); 1332 #include "clang/Basic/OpenCLImageTypes.def" 1333 1334 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); 1335 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); 1336 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); 1337 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); 1338 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); 1339 1340 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 1341 InitBuiltinType(Id##Ty, BuiltinType::Id); 1342 #include "clang/Basic/OpenCLExtensionTypes.def" 1343 } 1344 1345 if (Target.hasAArch64SVETypes()) { 1346 #define SVE_TYPE(Name, Id, SingletonId) \ 1347 InitBuiltinType(SingletonId, BuiltinType::Id); 1348 #include "clang/Basic/AArch64SVEACLETypes.def" 1349 } 1350 1351 if (Target.getTriple().isPPC64()) { 1352 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 1353 InitBuiltinType(Id##Ty, BuiltinType::Id); 1354 #include "clang/Basic/PPCTypes.def" 1355 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 1356 InitBuiltinType(Id##Ty, BuiltinType::Id); 1357 #include "clang/Basic/PPCTypes.def" 1358 } 1359 1360 if (Target.hasRISCVVTypes()) { 1361 #define RVV_TYPE(Name, Id, SingletonId) \ 1362 InitBuiltinType(SingletonId, BuiltinType::Id); 1363 #include "clang/Basic/RISCVVTypes.def" 1364 } 1365 1366 if (Target.getTriple().isWasm() && Target.hasFeature("reference-types")) { 1367 #define WASM_TYPE(Name, Id, SingletonId) \ 1368 InitBuiltinType(SingletonId, BuiltinType::Id); 1369 #include "clang/Basic/WebAssemblyReferenceTypes.def" 1370 } 1371 1372 // Builtin type for __objc_yes and __objc_no 1373 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? 1374 SignedCharTy : BoolTy); 1375 1376 ObjCConstantStringType = QualType(); 1377 1378 ObjCSuperType = QualType(); 1379 1380 // void * type 1381 if (LangOpts.OpenCLGenericAddressSpace) { 1382 auto Q = VoidTy.getQualifiers(); 1383 Q.setAddressSpace(LangAS::opencl_generic); 1384 VoidPtrTy = getPointerType(getCanonicalType( 1385 getQualifiedType(VoidTy.getUnqualifiedType(), Q))); 1386 } else { 1387 VoidPtrTy = getPointerType(VoidTy); 1388 } 1389 1390 // nullptr type (C++0x 2.14.7) 1391 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 1392 1393 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 1394 InitBuiltinType(HalfTy, BuiltinType::Half); 1395 1396 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); 1397 1398 // Builtin type used to help define __builtin_va_list. 1399 VaListTagDecl = nullptr; 1400 1401 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. 1402 if (LangOpts.MicrosoftExt || LangOpts.Borland) { 1403 MSGuidTagDecl = buildImplicitRecord("_GUID"); 1404 getTranslationUnitDecl()->addDecl(MSGuidTagDecl); 1405 } 1406 } 1407 1408 DiagnosticsEngine &ASTContext::getDiagnostics() const { 1409 return SourceMgr.getDiagnostics(); 1410 } 1411 1412 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 1413 AttrVec *&Result = DeclAttrs[D]; 1414 if (!Result) { 1415 void *Mem = Allocate(sizeof(AttrVec)); 1416 Result = new (Mem) AttrVec; 1417 } 1418 1419 return *Result; 1420 } 1421 1422 /// Erase the attributes corresponding to the given declaration. 1423 void ASTContext::eraseDeclAttrs(const Decl *D) { 1424 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 1425 if (Pos != DeclAttrs.end()) { 1426 Pos->second->~AttrVec(); 1427 DeclAttrs.erase(Pos); 1428 } 1429 } 1430 1431 // FIXME: Remove ? 1432 MemberSpecializationInfo * 1433 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 1434 assert(Var->isStaticDataMember() && "Not a static data member"); 1435 return getTemplateOrSpecializationInfo(Var) 1436 .dyn_cast<MemberSpecializationInfo *>(); 1437 } 1438 1439 ASTContext::TemplateOrSpecializationInfo 1440 ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { 1441 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = 1442 TemplateOrInstantiation.find(Var); 1443 if (Pos == TemplateOrInstantiation.end()) 1444 return {}; 1445 1446 return Pos->second; 1447 } 1448 1449 void 1450 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 1451 TemplateSpecializationKind TSK, 1452 SourceLocation PointOfInstantiation) { 1453 assert(Inst->isStaticDataMember() && "Not a static data member"); 1454 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 1455 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( 1456 Tmpl, TSK, PointOfInstantiation)); 1457 } 1458 1459 void 1460 ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, 1461 TemplateOrSpecializationInfo TSI) { 1462 assert(!TemplateOrInstantiation[Inst] && 1463 "Already noted what the variable was instantiated from"); 1464 TemplateOrInstantiation[Inst] = TSI; 1465 } 1466 1467 NamedDecl * 1468 ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { 1469 return InstantiatedFromUsingDecl.lookup(UUD); 1470 } 1471 1472 void 1473 ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { 1474 assert((isa<UsingDecl>(Pattern) || 1475 isa<UnresolvedUsingValueDecl>(Pattern) || 1476 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 1477 "pattern decl is not a using decl"); 1478 assert((isa<UsingDecl>(Inst) || 1479 isa<UnresolvedUsingValueDecl>(Inst) || 1480 isa<UnresolvedUsingTypenameDecl>(Inst)) && 1481 "instantiation did not produce a using decl"); 1482 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 1483 InstantiatedFromUsingDecl[Inst] = Pattern; 1484 } 1485 1486 UsingEnumDecl * 1487 ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { 1488 return InstantiatedFromUsingEnumDecl.lookup(UUD); 1489 } 1490 1491 void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, 1492 UsingEnumDecl *Pattern) { 1493 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists"); 1494 InstantiatedFromUsingEnumDecl[Inst] = Pattern; 1495 } 1496 1497 UsingShadowDecl * 1498 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 1499 return InstantiatedFromUsingShadowDecl.lookup(Inst); 1500 } 1501 1502 void 1503 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 1504 UsingShadowDecl *Pattern) { 1505 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 1506 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 1507 } 1508 1509 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 1510 return InstantiatedFromUnnamedFieldDecl.lookup(Field); 1511 } 1512 1513 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 1514 FieldDecl *Tmpl) { 1515 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 1516 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 1517 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 1518 "Already noted what unnamed field was instantiated from"); 1519 1520 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 1521 } 1522 1523 ASTContext::overridden_cxx_method_iterator 1524 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 1525 return overridden_methods(Method).begin(); 1526 } 1527 1528 ASTContext::overridden_cxx_method_iterator 1529 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 1530 return overridden_methods(Method).end(); 1531 } 1532 1533 unsigned 1534 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 1535 auto Range = overridden_methods(Method); 1536 return Range.end() - Range.begin(); 1537 } 1538 1539 ASTContext::overridden_method_range 1540 ASTContext::overridden_methods(const CXXMethodDecl *Method) const { 1541 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1542 OverriddenMethods.find(Method->getCanonicalDecl()); 1543 if (Pos == OverriddenMethods.end()) 1544 return overridden_method_range(nullptr, nullptr); 1545 return overridden_method_range(Pos->second.begin(), Pos->second.end()); 1546 } 1547 1548 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 1549 const CXXMethodDecl *Overridden) { 1550 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); 1551 OverriddenMethods[Method].push_back(Overridden); 1552 } 1553 1554 void ASTContext::getOverriddenMethods( 1555 const NamedDecl *D, 1556 SmallVectorImpl<const NamedDecl *> &Overridden) const { 1557 assert(D); 1558 1559 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { 1560 Overridden.append(overridden_methods_begin(CXXMethod), 1561 overridden_methods_end(CXXMethod)); 1562 return; 1563 } 1564 1565 const auto *Method = dyn_cast<ObjCMethodDecl>(D); 1566 if (!Method) 1567 return; 1568 1569 SmallVector<const ObjCMethodDecl *, 8> OverDecls; 1570 Method->getOverriddenMethods(OverDecls); 1571 Overridden.append(OverDecls.begin(), OverDecls.end()); 1572 } 1573 1574 void ASTContext::addedLocalImportDecl(ImportDecl *Import) { 1575 assert(!Import->getNextLocalImport() && 1576 "Import declaration already in the chain"); 1577 assert(!Import->isFromASTFile() && "Non-local import declaration"); 1578 if (!FirstLocalImport) { 1579 FirstLocalImport = Import; 1580 LastLocalImport = Import; 1581 return; 1582 } 1583 1584 LastLocalImport->setNextLocalImport(Import); 1585 LastLocalImport = Import; 1586 } 1587 1588 //===----------------------------------------------------------------------===// 1589 // Type Sizing and Analysis 1590 //===----------------------------------------------------------------------===// 1591 1592 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 1593 /// scalar floating point type. 1594 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 1595 switch (T->castAs<BuiltinType>()->getKind()) { 1596 default: 1597 llvm_unreachable("Not a floating point type!"); 1598 case BuiltinType::BFloat16: 1599 return Target->getBFloat16Format(); 1600 case BuiltinType::Float16: 1601 return Target->getHalfFormat(); 1602 case BuiltinType::Half: 1603 // For HLSL, when the native half type is disabled, half will be treat as 1604 // float. 1605 if (getLangOpts().HLSL) 1606 if (getLangOpts().NativeHalfType) 1607 return Target->getHalfFormat(); 1608 else 1609 return Target->getFloatFormat(); 1610 else 1611 return Target->getHalfFormat(); 1612 case BuiltinType::Float: return Target->getFloatFormat(); 1613 case BuiltinType::Double: return Target->getDoubleFormat(); 1614 case BuiltinType::Ibm128: 1615 return Target->getIbm128Format(); 1616 case BuiltinType::LongDouble: 1617 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice) 1618 return AuxTarget->getLongDoubleFormat(); 1619 return Target->getLongDoubleFormat(); 1620 case BuiltinType::Float128: 1621 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice) 1622 return AuxTarget->getFloat128Format(); 1623 return Target->getFloat128Format(); 1624 } 1625 } 1626 1627 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { 1628 unsigned Align = Target->getCharWidth(); 1629 1630 const unsigned AlignFromAttr = D->getMaxAlignment(); 1631 if (AlignFromAttr) 1632 Align = AlignFromAttr; 1633 1634 // __attribute__((aligned)) can increase or decrease alignment 1635 // *except* on a struct or struct member, where it only increases 1636 // alignment unless 'packed' is also specified. 1637 // 1638 // It is an error for alignas to decrease alignment, so we can 1639 // ignore that possibility; Sema should diagnose it. 1640 bool UseAlignAttrOnly; 1641 if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) 1642 UseAlignAttrOnly = 1643 FD->hasAttr<PackedAttr>() || FD->getParent()->hasAttr<PackedAttr>(); 1644 else 1645 UseAlignAttrOnly = AlignFromAttr != 0; 1646 // If we're using the align attribute only, just ignore everything 1647 // else about the declaration and its type. 1648 if (UseAlignAttrOnly) { 1649 // do nothing 1650 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) { 1651 QualType T = VD->getType(); 1652 if (const auto *RT = T->getAs<ReferenceType>()) { 1653 if (ForAlignof) 1654 T = RT->getPointeeType(); 1655 else 1656 T = getPointerType(RT->getPointeeType()); 1657 } 1658 QualType BaseT = getBaseElementType(T); 1659 if (T->isFunctionType()) 1660 Align = getTypeInfoImpl(T.getTypePtr()).Align; 1661 else if (!BaseT->isIncompleteType()) { 1662 // Adjust alignments of declarations with array type by the 1663 // large-array alignment on the target. 1664 if (const ArrayType *arrayType = getAsArrayType(T)) { 1665 unsigned MinWidth = Target->getLargeArrayMinWidth(); 1666 if (!ForAlignof && MinWidth) { 1667 if (isa<VariableArrayType>(arrayType)) 1668 Align = std::max(Align, Target->getLargeArrayAlign()); 1669 else if (isa<ConstantArrayType>(arrayType) && 1670 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 1671 Align = std::max(Align, Target->getLargeArrayAlign()); 1672 } 1673 } 1674 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 1675 if (BaseT.getQualifiers().hasUnaligned()) 1676 Align = Target->getCharWidth(); 1677 } 1678 1679 // Ensure miminum alignment for global variables. 1680 if (const auto *VD = dyn_cast<VarDecl>(D)) 1681 if (VD->hasGlobalStorage() && !ForAlignof) { 1682 uint64_t TypeSize = 1683 !BaseT->isIncompleteType() ? getTypeSize(T.getTypePtr()) : 0; 1684 Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); 1685 } 1686 1687 // Fields can be subject to extra alignment constraints, like if 1688 // the field is packed, the struct is packed, or the struct has a 1689 // a max-field-alignment constraint (#pragma pack). So calculate 1690 // the actual alignment of the field within the struct, and then 1691 // (as we're expected to) constrain that by the alignment of the type. 1692 if (const auto *Field = dyn_cast<FieldDecl>(VD)) { 1693 const RecordDecl *Parent = Field->getParent(); 1694 // We can only produce a sensible answer if the record is valid. 1695 if (!Parent->isInvalidDecl()) { 1696 const ASTRecordLayout &Layout = getASTRecordLayout(Parent); 1697 1698 // Start with the record's overall alignment. 1699 unsigned FieldAlign = toBits(Layout.getAlignment()); 1700 1701 // Use the GCD of that and the offset within the record. 1702 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); 1703 if (Offset > 0) { 1704 // Alignment is always a power of 2, so the GCD will be a power of 2, 1705 // which means we get to do this crazy thing instead of Euclid's. 1706 uint64_t LowBitOfOffset = Offset & (~Offset + 1); 1707 if (LowBitOfOffset < FieldAlign) 1708 FieldAlign = static_cast<unsigned>(LowBitOfOffset); 1709 } 1710 1711 Align = std::min(Align, FieldAlign); 1712 } 1713 } 1714 } 1715 1716 // Some targets have hard limitation on the maximum requestable alignment in 1717 // aligned attribute for static variables. 1718 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); 1719 const auto *VD = dyn_cast<VarDecl>(D); 1720 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) 1721 Align = std::min(Align, MaxAlignedAttr); 1722 1723 return toCharUnitsFromBits(Align); 1724 } 1725 1726 CharUnits ASTContext::getExnObjectAlignment() const { 1727 return toCharUnitsFromBits(Target->getExnObjectAlignment()); 1728 } 1729 1730 // getTypeInfoDataSizeInChars - Return the size of a type, in 1731 // chars. If the type is a record, its data size is returned. This is 1732 // the size of the memcpy that's performed when assigning this type 1733 // using a trivial copy/move assignment operator. 1734 TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { 1735 TypeInfoChars Info = getTypeInfoInChars(T); 1736 1737 // In C++, objects can sometimes be allocated into the tail padding 1738 // of a base-class subobject. We decide whether that's possible 1739 // during class layout, so here we can just trust the layout results. 1740 if (getLangOpts().CPlusPlus) { 1741 if (const auto *RT = T->getAs<RecordType>()) { 1742 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); 1743 Info.Width = layout.getDataSize(); 1744 } 1745 } 1746 1747 return Info; 1748 } 1749 1750 /// getConstantArrayInfoInChars - Performing the computation in CharUnits 1751 /// instead of in bits prevents overflowing the uint64_t for some large arrays. 1752 TypeInfoChars 1753 static getConstantArrayInfoInChars(const ASTContext &Context, 1754 const ConstantArrayType *CAT) { 1755 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); 1756 uint64_t Size = CAT->getSize().getZExtValue(); 1757 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <= 1758 (uint64_t)(-1)/Size) && 1759 "Overflow in array type char size evaluation"); 1760 uint64_t Width = EltInfo.Width.getQuantity() * Size; 1761 unsigned Align = EltInfo.Align.getQuantity(); 1762 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || 1763 Context.getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1764 Width = llvm::alignTo(Width, Align); 1765 return TypeInfoChars(CharUnits::fromQuantity(Width), 1766 CharUnits::fromQuantity(Align), 1767 EltInfo.AlignRequirement); 1768 } 1769 1770 TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { 1771 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1772 return getConstantArrayInfoInChars(*this, CAT); 1773 TypeInfo Info = getTypeInfo(T); 1774 return TypeInfoChars(toCharUnitsFromBits(Info.Width), 1775 toCharUnitsFromBits(Info.Align), Info.AlignRequirement); 1776 } 1777 1778 TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { 1779 return getTypeInfoInChars(T.getTypePtr()); 1780 } 1781 1782 bool ASTContext::isPromotableIntegerType(QualType T) const { 1783 // HLSL doesn't promote all small integer types to int, it 1784 // just uses the rank-based promotion rules for all types. 1785 if (getLangOpts().HLSL) 1786 return false; 1787 1788 if (const auto *BT = T->getAs<BuiltinType>()) 1789 switch (BT->getKind()) { 1790 case BuiltinType::Bool: 1791 case BuiltinType::Char_S: 1792 case BuiltinType::Char_U: 1793 case BuiltinType::SChar: 1794 case BuiltinType::UChar: 1795 case BuiltinType::Short: 1796 case BuiltinType::UShort: 1797 case BuiltinType::WChar_S: 1798 case BuiltinType::WChar_U: 1799 case BuiltinType::Char8: 1800 case BuiltinType::Char16: 1801 case BuiltinType::Char32: 1802 return true; 1803 default: 1804 return false; 1805 } 1806 1807 // Enumerated types are promotable to their compatible integer types 1808 // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2). 1809 if (const auto *ET = T->getAs<EnumType>()) { 1810 if (T->isDependentType() || ET->getDecl()->getPromotionType().isNull() || 1811 ET->getDecl()->isScoped()) 1812 return false; 1813 1814 return true; 1815 } 1816 1817 return false; 1818 } 1819 1820 bool ASTContext::isAlignmentRequired(const Type *T) const { 1821 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None; 1822 } 1823 1824 bool ASTContext::isAlignmentRequired(QualType T) const { 1825 return isAlignmentRequired(T.getTypePtr()); 1826 } 1827 1828 unsigned ASTContext::getTypeAlignIfKnown(QualType T, 1829 bool NeedsPreferredAlignment) const { 1830 // An alignment on a typedef overrides anything else. 1831 if (const auto *TT = T->getAs<TypedefType>()) 1832 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1833 return Align; 1834 1835 // If we have an (array of) complete type, we're done. 1836 T = getBaseElementType(T); 1837 if (!T->isIncompleteType()) 1838 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T); 1839 1840 // If we had an array type, its element type might be a typedef 1841 // type with an alignment attribute. 1842 if (const auto *TT = T->getAs<TypedefType>()) 1843 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1844 return Align; 1845 1846 // Otherwise, see if the declaration of the type had an attribute. 1847 if (const auto *TT = T->getAs<TagType>()) 1848 return TT->getDecl()->getMaxAlignment(); 1849 1850 return 0; 1851 } 1852 1853 TypeInfo ASTContext::getTypeInfo(const Type *T) const { 1854 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); 1855 if (I != MemoizedTypeInfo.end()) 1856 return I->second; 1857 1858 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. 1859 TypeInfo TI = getTypeInfoImpl(T); 1860 MemoizedTypeInfo[T] = TI; 1861 return TI; 1862 } 1863 1864 /// getTypeInfoImpl - Return the size of the specified type, in bits. This 1865 /// method does not work on incomplete types. 1866 /// 1867 /// FIXME: Pointers into different addr spaces could have different sizes and 1868 /// alignment requirements: getPointerInfo should take an AddrSpace, this 1869 /// should take a QualType, &c. 1870 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { 1871 uint64_t Width = 0; 1872 unsigned Align = 8; 1873 AlignRequirementKind AlignRequirement = AlignRequirementKind::None; 1874 LangAS AS = LangAS::Default; 1875 switch (T->getTypeClass()) { 1876 #define TYPE(Class, Base) 1877 #define ABSTRACT_TYPE(Class, Base) 1878 #define NON_CANONICAL_TYPE(Class, Base) 1879 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1880 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ 1881 case Type::Class: \ 1882 assert(!T->isDependentType() && "should not see dependent types here"); \ 1883 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); 1884 #include "clang/AST/TypeNodes.inc" 1885 llvm_unreachable("Should not see dependent types"); 1886 1887 case Type::FunctionNoProto: 1888 case Type::FunctionProto: 1889 // GCC extension: alignof(function) = 32 bits 1890 Width = 0; 1891 Align = 32; 1892 break; 1893 1894 case Type::IncompleteArray: 1895 case Type::VariableArray: 1896 case Type::ConstantArray: { 1897 // Model non-constant sized arrays as size zero, but track the alignment. 1898 uint64_t Size = 0; 1899 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1900 Size = CAT->getSize().getZExtValue(); 1901 1902 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); 1903 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && 1904 "Overflow in array type bit size evaluation"); 1905 Width = EltInfo.Width * Size; 1906 Align = EltInfo.Align; 1907 AlignRequirement = EltInfo.AlignRequirement; 1908 if (!getTargetInfo().getCXXABI().isMicrosoft() || 1909 getTargetInfo().getPointerWidth(LangAS::Default) == 64) 1910 Width = llvm::alignTo(Width, Align); 1911 break; 1912 } 1913 1914 case Type::ExtVector: 1915 case Type::Vector: { 1916 const auto *VT = cast<VectorType>(T); 1917 TypeInfo EltInfo = getTypeInfo(VT->getElementType()); 1918 Width = VT->isExtVectorBoolType() ? VT->getNumElements() 1919 : EltInfo.Width * VT->getNumElements(); 1920 // Enforce at least byte size and alignment. 1921 Width = std::max<unsigned>(8, Width); 1922 Align = std::max<unsigned>(8, Width); 1923 1924 // If the alignment is not a power of 2, round up to the next power of 2. 1925 // This happens for non-power-of-2 length vectors. 1926 if (Align & (Align-1)) { 1927 Align = llvm::bit_ceil(Align); 1928 Width = llvm::alignTo(Width, Align); 1929 } 1930 // Adjust the alignment based on the target max. 1931 uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); 1932 if (TargetVectorAlign && TargetVectorAlign < Align) 1933 Align = TargetVectorAlign; 1934 if (VT->getVectorKind() == VectorKind::SveFixedLengthData) 1935 // Adjust the alignment for fixed-length SVE vectors. This is important 1936 // for non-power-of-2 vector lengths. 1937 Align = 128; 1938 else if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) 1939 // Adjust the alignment for fixed-length SVE predicates. 1940 Align = 16; 1941 else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData) 1942 // Adjust the alignment for fixed-length RVV vectors. 1943 Align = std::min<unsigned>(64, Width); 1944 break; 1945 } 1946 1947 case Type::ConstantMatrix: { 1948 const auto *MT = cast<ConstantMatrixType>(T); 1949 TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); 1950 // The internal layout of a matrix value is implementation defined. 1951 // Initially be ABI compatible with arrays with respect to alignment and 1952 // size. 1953 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); 1954 Align = ElementInfo.Align; 1955 break; 1956 } 1957 1958 case Type::Builtin: 1959 switch (cast<BuiltinType>(T)->getKind()) { 1960 default: llvm_unreachable("Unknown builtin type!"); 1961 case BuiltinType::Void: 1962 // GCC extension: alignof(void) = 8 bits. 1963 Width = 0; 1964 Align = 8; 1965 break; 1966 case BuiltinType::Bool: 1967 Width = Target->getBoolWidth(); 1968 Align = Target->getBoolAlign(); 1969 break; 1970 case BuiltinType::Char_S: 1971 case BuiltinType::Char_U: 1972 case BuiltinType::UChar: 1973 case BuiltinType::SChar: 1974 case BuiltinType::Char8: 1975 Width = Target->getCharWidth(); 1976 Align = Target->getCharAlign(); 1977 break; 1978 case BuiltinType::WChar_S: 1979 case BuiltinType::WChar_U: 1980 Width = Target->getWCharWidth(); 1981 Align = Target->getWCharAlign(); 1982 break; 1983 case BuiltinType::Char16: 1984 Width = Target->getChar16Width(); 1985 Align = Target->getChar16Align(); 1986 break; 1987 case BuiltinType::Char32: 1988 Width = Target->getChar32Width(); 1989 Align = Target->getChar32Align(); 1990 break; 1991 case BuiltinType::UShort: 1992 case BuiltinType::Short: 1993 Width = Target->getShortWidth(); 1994 Align = Target->getShortAlign(); 1995 break; 1996 case BuiltinType::UInt: 1997 case BuiltinType::Int: 1998 Width = Target->getIntWidth(); 1999 Align = Target->getIntAlign(); 2000 break; 2001 case BuiltinType::ULong: 2002 case BuiltinType::Long: 2003 Width = Target->getLongWidth(); 2004 Align = Target->getLongAlign(); 2005 break; 2006 case BuiltinType::ULongLong: 2007 case BuiltinType::LongLong: 2008 Width = Target->getLongLongWidth(); 2009 Align = Target->getLongLongAlign(); 2010 break; 2011 case BuiltinType::Int128: 2012 case BuiltinType::UInt128: 2013 Width = 128; 2014 Align = Target->getInt128Align(); 2015 break; 2016 case BuiltinType::ShortAccum: 2017 case BuiltinType::UShortAccum: 2018 case BuiltinType::SatShortAccum: 2019 case BuiltinType::SatUShortAccum: 2020 Width = Target->getShortAccumWidth(); 2021 Align = Target->getShortAccumAlign(); 2022 break; 2023 case BuiltinType::Accum: 2024 case BuiltinType::UAccum: 2025 case BuiltinType::SatAccum: 2026 case BuiltinType::SatUAccum: 2027 Width = Target->getAccumWidth(); 2028 Align = Target->getAccumAlign(); 2029 break; 2030 case BuiltinType::LongAccum: 2031 case BuiltinType::ULongAccum: 2032 case BuiltinType::SatLongAccum: 2033 case BuiltinType::SatULongAccum: 2034 Width = Target->getLongAccumWidth(); 2035 Align = Target->getLongAccumAlign(); 2036 break; 2037 case BuiltinType::ShortFract: 2038 case BuiltinType::UShortFract: 2039 case BuiltinType::SatShortFract: 2040 case BuiltinType::SatUShortFract: 2041 Width = Target->getShortFractWidth(); 2042 Align = Target->getShortFractAlign(); 2043 break; 2044 case BuiltinType::Fract: 2045 case BuiltinType::UFract: 2046 case BuiltinType::SatFract: 2047 case BuiltinType::SatUFract: 2048 Width = Target->getFractWidth(); 2049 Align = Target->getFractAlign(); 2050 break; 2051 case BuiltinType::LongFract: 2052 case BuiltinType::ULongFract: 2053 case BuiltinType::SatLongFract: 2054 case BuiltinType::SatULongFract: 2055 Width = Target->getLongFractWidth(); 2056 Align = Target->getLongFractAlign(); 2057 break; 2058 case BuiltinType::BFloat16: 2059 if (Target->hasBFloat16Type()) { 2060 Width = Target->getBFloat16Width(); 2061 Align = Target->getBFloat16Align(); 2062 } else if ((getLangOpts().SYCLIsDevice || 2063 (getLangOpts().OpenMP && 2064 getLangOpts().OpenMPIsTargetDevice)) && 2065 AuxTarget->hasBFloat16Type()) { 2066 Width = AuxTarget->getBFloat16Width(); 2067 Align = AuxTarget->getBFloat16Align(); 2068 } 2069 break; 2070 case BuiltinType::Float16: 2071 case BuiltinType::Half: 2072 if (Target->hasFloat16Type() || !getLangOpts().OpenMP || 2073 !getLangOpts().OpenMPIsTargetDevice) { 2074 Width = Target->getHalfWidth(); 2075 Align = Target->getHalfAlign(); 2076 } else { 2077 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2078 "Expected OpenMP device compilation."); 2079 Width = AuxTarget->getHalfWidth(); 2080 Align = AuxTarget->getHalfAlign(); 2081 } 2082 break; 2083 case BuiltinType::Float: 2084 Width = Target->getFloatWidth(); 2085 Align = Target->getFloatAlign(); 2086 break; 2087 case BuiltinType::Double: 2088 Width = Target->getDoubleWidth(); 2089 Align = Target->getDoubleAlign(); 2090 break; 2091 case BuiltinType::Ibm128: 2092 Width = Target->getIbm128Width(); 2093 Align = Target->getIbm128Align(); 2094 break; 2095 case BuiltinType::LongDouble: 2096 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2097 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || 2098 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { 2099 Width = AuxTarget->getLongDoubleWidth(); 2100 Align = AuxTarget->getLongDoubleAlign(); 2101 } else { 2102 Width = Target->getLongDoubleWidth(); 2103 Align = Target->getLongDoubleAlign(); 2104 } 2105 break; 2106 case BuiltinType::Float128: 2107 if (Target->hasFloat128Type() || !getLangOpts().OpenMP || 2108 !getLangOpts().OpenMPIsTargetDevice) { 2109 Width = Target->getFloat128Width(); 2110 Align = Target->getFloat128Align(); 2111 } else { 2112 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsTargetDevice && 2113 "Expected OpenMP device compilation."); 2114 Width = AuxTarget->getFloat128Width(); 2115 Align = AuxTarget->getFloat128Align(); 2116 } 2117 break; 2118 case BuiltinType::NullPtr: 2119 // C++ 3.9.1p11: sizeof(nullptr_t) == sizeof(void*) 2120 Width = Target->getPointerWidth(LangAS::Default); 2121 Align = Target->getPointerAlign(LangAS::Default); 2122 break; 2123 case BuiltinType::ObjCId: 2124 case BuiltinType::ObjCClass: 2125 case BuiltinType::ObjCSel: 2126 Width = Target->getPointerWidth(LangAS::Default); 2127 Align = Target->getPointerAlign(LangAS::Default); 2128 break; 2129 case BuiltinType::OCLSampler: 2130 case BuiltinType::OCLEvent: 2131 case BuiltinType::OCLClkEvent: 2132 case BuiltinType::OCLQueue: 2133 case BuiltinType::OCLReserveID: 2134 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 2135 case BuiltinType::Id: 2136 #include "clang/Basic/OpenCLImageTypes.def" 2137 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 2138 case BuiltinType::Id: 2139 #include "clang/Basic/OpenCLExtensionTypes.def" 2140 AS = Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 2141 Width = Target->getPointerWidth(AS); 2142 Align = Target->getPointerAlign(AS); 2143 break; 2144 // The SVE types are effectively target-specific. The length of an 2145 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple 2146 // of 128 bits. There is one predicate bit for each vector byte, so the 2147 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. 2148 // 2149 // Because the length is only known at runtime, we use a dummy value 2150 // of 0 for the static length. The alignment values are those defined 2151 // by the Procedure Call Standard for the Arm Architecture. 2152 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 2153 IsSigned, IsFP, IsBF) \ 2154 case BuiltinType::Id: \ 2155 Width = 0; \ 2156 Align = 128; \ 2157 break; 2158 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 2159 case BuiltinType::Id: \ 2160 Width = 0; \ 2161 Align = 16; \ 2162 break; 2163 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingletonId) \ 2164 case BuiltinType::Id: \ 2165 Width = 0; \ 2166 Align = 16; \ 2167 break; 2168 #include "clang/Basic/AArch64SVEACLETypes.def" 2169 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 2170 case BuiltinType::Id: \ 2171 Width = Size; \ 2172 Align = Size; \ 2173 break; 2174 #include "clang/Basic/PPCTypes.def" 2175 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ 2176 IsFP, IsBF) \ 2177 case BuiltinType::Id: \ 2178 Width = 0; \ 2179 Align = ElBits; \ 2180 break; 2181 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \ 2182 case BuiltinType::Id: \ 2183 Width = 0; \ 2184 Align = 8; \ 2185 break; 2186 #include "clang/Basic/RISCVVTypes.def" 2187 #define WASM_TYPE(Name, Id, SingletonId) \ 2188 case BuiltinType::Id: \ 2189 Width = 0; \ 2190 Align = 8; \ 2191 break; 2192 #include "clang/Basic/WebAssemblyReferenceTypes.def" 2193 } 2194 break; 2195 case Type::ObjCObjectPointer: 2196 Width = Target->getPointerWidth(LangAS::Default); 2197 Align = Target->getPointerAlign(LangAS::Default); 2198 break; 2199 case Type::BlockPointer: 2200 AS = cast<BlockPointerType>(T)->getPointeeType().getAddressSpace(); 2201 Width = Target->getPointerWidth(AS); 2202 Align = Target->getPointerAlign(AS); 2203 break; 2204 case Type::LValueReference: 2205 case Type::RValueReference: 2206 // alignof and sizeof should never enter this code path here, so we go 2207 // the pointer route. 2208 AS = cast<ReferenceType>(T)->getPointeeType().getAddressSpace(); 2209 Width = Target->getPointerWidth(AS); 2210 Align = Target->getPointerAlign(AS); 2211 break; 2212 case Type::Pointer: 2213 AS = cast<PointerType>(T)->getPointeeType().getAddressSpace(); 2214 Width = Target->getPointerWidth(AS); 2215 Align = Target->getPointerAlign(AS); 2216 break; 2217 case Type::MemberPointer: { 2218 const auto *MPT = cast<MemberPointerType>(T); 2219 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); 2220 Width = MPI.Width; 2221 Align = MPI.Align; 2222 break; 2223 } 2224 case Type::Complex: { 2225 // Complex types have the same alignment as their elements, but twice the 2226 // size. 2227 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType()); 2228 Width = EltInfo.Width * 2; 2229 Align = EltInfo.Align; 2230 break; 2231 } 2232 case Type::ObjCObject: 2233 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 2234 case Type::Adjusted: 2235 case Type::Decayed: 2236 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr()); 2237 case Type::ObjCInterface: { 2238 const auto *ObjCI = cast<ObjCInterfaceType>(T); 2239 if (ObjCI->getDecl()->isInvalidDecl()) { 2240 Width = 8; 2241 Align = 8; 2242 break; 2243 } 2244 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2245 Width = toBits(Layout.getSize()); 2246 Align = toBits(Layout.getAlignment()); 2247 break; 2248 } 2249 case Type::BitInt: { 2250 const auto *EIT = cast<BitIntType>(T); 2251 Align = std::clamp<unsigned>(llvm::PowerOf2Ceil(EIT->getNumBits()), 2252 getCharWidth(), Target->getLongLongAlign()); 2253 Width = llvm::alignTo(EIT->getNumBits(), Align); 2254 break; 2255 } 2256 case Type::Record: 2257 case Type::Enum: { 2258 const auto *TT = cast<TagType>(T); 2259 2260 if (TT->getDecl()->isInvalidDecl()) { 2261 Width = 8; 2262 Align = 8; 2263 break; 2264 } 2265 2266 if (const auto *ET = dyn_cast<EnumType>(TT)) { 2267 const EnumDecl *ED = ET->getDecl(); 2268 TypeInfo Info = 2269 getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); 2270 if (unsigned AttrAlign = ED->getMaxAlignment()) { 2271 Info.Align = AttrAlign; 2272 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum; 2273 } 2274 return Info; 2275 } 2276 2277 const auto *RT = cast<RecordType>(TT); 2278 const RecordDecl *RD = RT->getDecl(); 2279 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2280 Width = toBits(Layout.getSize()); 2281 Align = toBits(Layout.getAlignment()); 2282 AlignRequirement = RD->hasAttr<AlignedAttr>() 2283 ? AlignRequirementKind::RequiredByRecord 2284 : AlignRequirementKind::None; 2285 break; 2286 } 2287 2288 case Type::SubstTemplateTypeParm: 2289 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 2290 getReplacementType().getTypePtr()); 2291 2292 case Type::Auto: 2293 case Type::DeducedTemplateSpecialization: { 2294 const auto *A = cast<DeducedType>(T); 2295 assert(!A->getDeducedType().isNull() && 2296 "cannot request the size of an undeduced or dependent auto type"); 2297 return getTypeInfo(A->getDeducedType().getTypePtr()); 2298 } 2299 2300 case Type::Paren: 2301 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 2302 2303 case Type::MacroQualified: 2304 return getTypeInfo( 2305 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr()); 2306 2307 case Type::ObjCTypeParam: 2308 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr()); 2309 2310 case Type::Using: 2311 return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr()); 2312 2313 case Type::Typedef: { 2314 const auto *TT = cast<TypedefType>(T); 2315 TypeInfo Info = getTypeInfo(TT->desugar().getTypePtr()); 2316 // If the typedef has an aligned attribute on it, it overrides any computed 2317 // alignment we have. This violates the GCC documentation (which says that 2318 // attribute(aligned) can only round up) but matches its implementation. 2319 if (unsigned AttrAlign = TT->getDecl()->getMaxAlignment()) { 2320 Align = AttrAlign; 2321 AlignRequirement = AlignRequirementKind::RequiredByTypedef; 2322 } else { 2323 Align = Info.Align; 2324 AlignRequirement = Info.AlignRequirement; 2325 } 2326 Width = Info.Width; 2327 break; 2328 } 2329 2330 case Type::Elaborated: 2331 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 2332 2333 case Type::Attributed: 2334 return getTypeInfo( 2335 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 2336 2337 case Type::BTFTagAttributed: 2338 return getTypeInfo( 2339 cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr()); 2340 2341 case Type::Atomic: { 2342 // Start with the base type information. 2343 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); 2344 Width = Info.Width; 2345 Align = Info.Align; 2346 2347 if (!Width) { 2348 // An otherwise zero-sized type should still generate an 2349 // atomic operation. 2350 Width = Target->getCharWidth(); 2351 assert(Align); 2352 } else if (Width <= Target->getMaxAtomicPromoteWidth()) { 2353 // If the size of the type doesn't exceed the platform's max 2354 // atomic promotion width, make the size and alignment more 2355 // favorable to atomic operations: 2356 2357 // Round the size up to a power of 2. 2358 Width = llvm::bit_ceil(Width); 2359 2360 // Set the alignment equal to the size. 2361 Align = static_cast<unsigned>(Width); 2362 } 2363 } 2364 break; 2365 2366 case Type::Pipe: 2367 Width = Target->getPointerWidth(LangAS::opencl_global); 2368 Align = Target->getPointerAlign(LangAS::opencl_global); 2369 break; 2370 } 2371 2372 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 2373 return TypeInfo(Width, Align, AlignRequirement); 2374 } 2375 2376 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { 2377 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); 2378 if (I != MemoizedUnadjustedAlign.end()) 2379 return I->second; 2380 2381 unsigned UnadjustedAlign; 2382 if (const auto *RT = T->getAs<RecordType>()) { 2383 const RecordDecl *RD = RT->getDecl(); 2384 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2385 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2386 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) { 2387 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2388 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2389 } else { 2390 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); 2391 } 2392 2393 MemoizedUnadjustedAlign[T] = UnadjustedAlign; 2394 return UnadjustedAlign; 2395 } 2396 2397 unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { 2398 unsigned SimdAlign = llvm::OpenMPIRBuilder::getOpenMPDefaultSimdAlign( 2399 getTargetInfo().getTriple(), Target->getTargetOpts().FeatureMap); 2400 return SimdAlign; 2401 } 2402 2403 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 2404 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 2405 return CharUnits::fromQuantity(BitSize / getCharWidth()); 2406 } 2407 2408 /// toBits - Convert a size in characters to a size in characters. 2409 int64_t ASTContext::toBits(CharUnits CharSize) const { 2410 return CharSize.getQuantity() * getCharWidth(); 2411 } 2412 2413 /// getTypeSizeInChars - Return the size of the specified type, in characters. 2414 /// This method does not work on incomplete types. 2415 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 2416 return getTypeInfoInChars(T).Width; 2417 } 2418 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 2419 return getTypeInfoInChars(T).Width; 2420 } 2421 2422 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 2423 /// characters. This method does not work on incomplete types. 2424 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 2425 return toCharUnitsFromBits(getTypeAlign(T)); 2426 } 2427 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 2428 return toCharUnitsFromBits(getTypeAlign(T)); 2429 } 2430 2431 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a 2432 /// type, in characters, before alignment adjustments. This method does 2433 /// not work on incomplete types. 2434 CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { 2435 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2436 } 2437 CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { 2438 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2439 } 2440 2441 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 2442 /// type for the current target in bits. This can be different than the ABI 2443 /// alignment in cases where it is beneficial for performance or backwards 2444 /// compatibility preserving to overalign a data type. (Note: despite the name, 2445 /// the preferred alignment is ABI-impacting, and not an optimization.) 2446 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 2447 TypeInfo TI = getTypeInfo(T); 2448 unsigned ABIAlign = TI.Align; 2449 2450 T = T->getBaseElementTypeUnsafe(); 2451 2452 // The preferred alignment of member pointers is that of a pointer. 2453 if (T->isMemberPointerType()) 2454 return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); 2455 2456 if (!Target->allowsLargerPreferedTypeAlignment()) 2457 return ABIAlign; 2458 2459 if (const auto *RT = T->getAs<RecordType>()) { 2460 const RecordDecl *RD = RT->getDecl(); 2461 2462 // When used as part of a typedef, or together with a 'packed' attribute, 2463 // the 'aligned' attribute can be used to decrease alignment. Note that the 2464 // 'packed' case is already taken into consideration when computing the 2465 // alignment, we only need to handle the typedef case here. 2466 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef || 2467 RD->isInvalidDecl()) 2468 return ABIAlign; 2469 2470 unsigned PreferredAlign = static_cast<unsigned>( 2471 toBits(getASTRecordLayout(RD).PreferredAlignment)); 2472 assert(PreferredAlign >= ABIAlign && 2473 "PreferredAlign should be at least as large as ABIAlign."); 2474 return PreferredAlign; 2475 } 2476 2477 // Double (and, for targets supporting AIX `power` alignment, long double) and 2478 // long long should be naturally aligned (despite requiring less alignment) if 2479 // possible. 2480 if (const auto *CT = T->getAs<ComplexType>()) 2481 T = CT->getElementType().getTypePtr(); 2482 if (const auto *ET = T->getAs<EnumType>()) 2483 T = ET->getDecl()->getIntegerType().getTypePtr(); 2484 if (T->isSpecificBuiltinType(BuiltinType::Double) || 2485 T->isSpecificBuiltinType(BuiltinType::LongLong) || 2486 T->isSpecificBuiltinType(BuiltinType::ULongLong) || 2487 (T->isSpecificBuiltinType(BuiltinType::LongDouble) && 2488 Target->defaultsToAIXPowerAlignment())) 2489 // Don't increase the alignment if an alignment attribute was specified on a 2490 // typedef declaration. 2491 if (!TI.isAlignRequired()) 2492 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 2493 2494 return ABIAlign; 2495 } 2496 2497 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment 2498 /// for __attribute__((aligned)) on this target, to be used if no alignment 2499 /// value is specified. 2500 unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { 2501 return getTargetInfo().getDefaultAlignForAttributeAligned(); 2502 } 2503 2504 /// getAlignOfGlobalVar - Return the alignment in bits that should be given 2505 /// to a global variable of the specified type. 2506 unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { 2507 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 2508 return std::max(getPreferredTypeAlign(T), 2509 getTargetInfo().getMinGlobalAlign(TypeSize)); 2510 } 2511 2512 /// getAlignOfGlobalVarInChars - Return the alignment in characters that 2513 /// should be given to a global variable of the specified type. 2514 CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { 2515 return toCharUnitsFromBits(getAlignOfGlobalVar(T)); 2516 } 2517 2518 CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { 2519 CharUnits Offset = CharUnits::Zero(); 2520 const ASTRecordLayout *Layout = &getASTRecordLayout(RD); 2521 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { 2522 Offset += Layout->getBaseClassOffset(Base); 2523 Layout = &getASTRecordLayout(Base); 2524 } 2525 return Offset; 2526 } 2527 2528 CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const { 2529 const ValueDecl *MPD = MP.getMemberPointerDecl(); 2530 CharUnits ThisAdjustment = CharUnits::Zero(); 2531 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath(); 2532 bool DerivedMember = MP.isMemberPointerToDerivedMember(); 2533 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext()); 2534 for (unsigned I = 0, N = Path.size(); I != N; ++I) { 2535 const CXXRecordDecl *Base = RD; 2536 const CXXRecordDecl *Derived = Path[I]; 2537 if (DerivedMember) 2538 std::swap(Base, Derived); 2539 ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base); 2540 RD = Path[I]; 2541 } 2542 if (DerivedMember) 2543 ThisAdjustment = -ThisAdjustment; 2544 return ThisAdjustment; 2545 } 2546 2547 /// DeepCollectObjCIvars - 2548 /// This routine first collects all declared, but not synthesized, ivars in 2549 /// super class and then collects all ivars, including those synthesized for 2550 /// current class. This routine is used for implementation of current class 2551 /// when all ivars, declared and synthesized are known. 2552 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 2553 bool leafClass, 2554 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 2555 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 2556 DeepCollectObjCIvars(SuperClass, false, Ivars); 2557 if (!leafClass) { 2558 llvm::append_range(Ivars, OI->ivars()); 2559 } else { 2560 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 2561 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 2562 Iv= Iv->getNextIvar()) 2563 Ivars.push_back(Iv); 2564 } 2565 } 2566 2567 /// CollectInheritedProtocols - Collect all protocols in current class and 2568 /// those inherited by it. 2569 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 2570 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 2571 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 2572 // We can use protocol_iterator here instead of 2573 // all_referenced_protocol_iterator since we are walking all categories. 2574 for (auto *Proto : OI->all_referenced_protocols()) { 2575 CollectInheritedProtocols(Proto, Protocols); 2576 } 2577 2578 // Categories of this Interface. 2579 for (const auto *Cat : OI->visible_categories()) 2580 CollectInheritedProtocols(Cat, Protocols); 2581 2582 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 2583 while (SD) { 2584 CollectInheritedProtocols(SD, Protocols); 2585 SD = SD->getSuperClass(); 2586 } 2587 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 2588 for (auto *Proto : OC->protocols()) { 2589 CollectInheritedProtocols(Proto, Protocols); 2590 } 2591 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 2592 // Insert the protocol. 2593 if (!Protocols.insert( 2594 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second) 2595 return; 2596 2597 for (auto *Proto : OP->protocols()) 2598 CollectInheritedProtocols(Proto, Protocols); 2599 } 2600 } 2601 2602 static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, 2603 const RecordDecl *RD, 2604 bool CheckIfTriviallyCopyable) { 2605 assert(RD->isUnion() && "Must be union type"); 2606 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); 2607 2608 for (const auto *Field : RD->fields()) { 2609 if (!Context.hasUniqueObjectRepresentations(Field->getType(), 2610 CheckIfTriviallyCopyable)) 2611 return false; 2612 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); 2613 if (FieldSize != UnionSize) 2614 return false; 2615 } 2616 return !RD->field_empty(); 2617 } 2618 2619 static int64_t getSubobjectOffset(const FieldDecl *Field, 2620 const ASTContext &Context, 2621 const clang::ASTRecordLayout & /*Layout*/) { 2622 return Context.getFieldOffset(Field); 2623 } 2624 2625 static int64_t getSubobjectOffset(const CXXRecordDecl *RD, 2626 const ASTContext &Context, 2627 const clang::ASTRecordLayout &Layout) { 2628 return Context.toBits(Layout.getBaseClassOffset(RD)); 2629 } 2630 2631 static std::optional<int64_t> 2632 structHasUniqueObjectRepresentations(const ASTContext &Context, 2633 const RecordDecl *RD, 2634 bool CheckIfTriviallyCopyable); 2635 2636 static std::optional<int64_t> 2637 getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context, 2638 bool CheckIfTriviallyCopyable) { 2639 if (Field->getType()->isRecordType()) { 2640 const RecordDecl *RD = Field->getType()->getAsRecordDecl(); 2641 if (!RD->isUnion()) 2642 return structHasUniqueObjectRepresentations(Context, RD, 2643 CheckIfTriviallyCopyable); 2644 } 2645 2646 // A _BitInt type may not be unique if it has padding bits 2647 // but if it is a bitfield the padding bits are not used. 2648 bool IsBitIntType = Field->getType()->isBitIntType(); 2649 if (!Field->getType()->isReferenceType() && !IsBitIntType && 2650 !Context.hasUniqueObjectRepresentations(Field->getType(), 2651 CheckIfTriviallyCopyable)) 2652 return std::nullopt; 2653 2654 int64_t FieldSizeInBits = 2655 Context.toBits(Context.getTypeSizeInChars(Field->getType())); 2656 if (Field->isBitField()) { 2657 // If we have explicit padding bits, they don't contribute bits 2658 // to the actual object representation, so return 0. 2659 if (Field->isUnnamedBitfield()) 2660 return 0; 2661 2662 int64_t BitfieldSize = Field->getBitWidthValue(Context); 2663 if (IsBitIntType) { 2664 if ((unsigned)BitfieldSize > 2665 cast<BitIntType>(Field->getType())->getNumBits()) 2666 return std::nullopt; 2667 } else if (BitfieldSize > FieldSizeInBits) { 2668 return std::nullopt; 2669 } 2670 FieldSizeInBits = BitfieldSize; 2671 } else if (IsBitIntType && !Context.hasUniqueObjectRepresentations( 2672 Field->getType(), CheckIfTriviallyCopyable)) { 2673 return std::nullopt; 2674 } 2675 return FieldSizeInBits; 2676 } 2677 2678 static std::optional<int64_t> 2679 getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context, 2680 bool CheckIfTriviallyCopyable) { 2681 return structHasUniqueObjectRepresentations(Context, RD, 2682 CheckIfTriviallyCopyable); 2683 } 2684 2685 template <typename RangeT> 2686 static std::optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations( 2687 const RangeT &Subobjects, int64_t CurOffsetInBits, 2688 const ASTContext &Context, const clang::ASTRecordLayout &Layout, 2689 bool CheckIfTriviallyCopyable) { 2690 for (const auto *Subobject : Subobjects) { 2691 std::optional<int64_t> SizeInBits = 2692 getSubobjectSizeInBits(Subobject, Context, CheckIfTriviallyCopyable); 2693 if (!SizeInBits) 2694 return std::nullopt; 2695 if (*SizeInBits != 0) { 2696 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout); 2697 if (Offset != CurOffsetInBits) 2698 return std::nullopt; 2699 CurOffsetInBits += *SizeInBits; 2700 } 2701 } 2702 return CurOffsetInBits; 2703 } 2704 2705 static std::optional<int64_t> 2706 structHasUniqueObjectRepresentations(const ASTContext &Context, 2707 const RecordDecl *RD, 2708 bool CheckIfTriviallyCopyable) { 2709 assert(!RD->isUnion() && "Must be struct/class type"); 2710 const auto &Layout = Context.getASTRecordLayout(RD); 2711 2712 int64_t CurOffsetInBits = 0; 2713 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) { 2714 if (ClassDecl->isDynamicClass()) 2715 return std::nullopt; 2716 2717 SmallVector<CXXRecordDecl *, 4> Bases; 2718 for (const auto &Base : ClassDecl->bases()) { 2719 // Empty types can be inherited from, and non-empty types can potentially 2720 // have tail padding, so just make sure there isn't an error. 2721 Bases.emplace_back(Base.getType()->getAsCXXRecordDecl()); 2722 } 2723 2724 llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) { 2725 return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R); 2726 }); 2727 2728 std::optional<int64_t> OffsetAfterBases = 2729 structSubobjectsHaveUniqueObjectRepresentations( 2730 Bases, CurOffsetInBits, Context, Layout, CheckIfTriviallyCopyable); 2731 if (!OffsetAfterBases) 2732 return std::nullopt; 2733 CurOffsetInBits = *OffsetAfterBases; 2734 } 2735 2736 std::optional<int64_t> OffsetAfterFields = 2737 structSubobjectsHaveUniqueObjectRepresentations( 2738 RD->fields(), CurOffsetInBits, Context, Layout, 2739 CheckIfTriviallyCopyable); 2740 if (!OffsetAfterFields) 2741 return std::nullopt; 2742 CurOffsetInBits = *OffsetAfterFields; 2743 2744 return CurOffsetInBits; 2745 } 2746 2747 bool ASTContext::hasUniqueObjectRepresentations( 2748 QualType Ty, bool CheckIfTriviallyCopyable) const { 2749 // C++17 [meta.unary.prop]: 2750 // The predicate condition for a template specialization 2751 // has_unique_object_representations<T> shall be 2752 // satisfied if and only if: 2753 // (9.1) - T is trivially copyable, and 2754 // (9.2) - any two objects of type T with the same value have the same 2755 // object representation, where two objects 2756 // of array or non-union class type are considered to have the same value 2757 // if their respective sequences of 2758 // direct subobjects have the same values, and two objects of union type 2759 // are considered to have the same 2760 // value if they have the same active member and the corresponding members 2761 // have the same value. 2762 // The set of scalar types for which this condition holds is 2763 // implementation-defined. [ Note: If a type has padding 2764 // bits, the condition does not hold; otherwise, the condition holds true 2765 // for unsigned integral types. -- end note ] 2766 assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); 2767 2768 // Arrays are unique only if their element type is unique. 2769 if (Ty->isArrayType()) 2770 return hasUniqueObjectRepresentations(getBaseElementType(Ty), 2771 CheckIfTriviallyCopyable); 2772 2773 // (9.1) - T is trivially copyable... 2774 if (CheckIfTriviallyCopyable && !Ty.isTriviallyCopyableType(*this)) 2775 return false; 2776 2777 // All integrals and enums are unique. 2778 if (Ty->isIntegralOrEnumerationType()) { 2779 // Except _BitInt types that have padding bits. 2780 if (const auto *BIT = Ty->getAs<BitIntType>()) 2781 return getTypeSize(BIT) == BIT->getNumBits(); 2782 2783 return true; 2784 } 2785 2786 // All other pointers are unique. 2787 if (Ty->isPointerType()) 2788 return true; 2789 2790 if (const auto *MPT = Ty->getAs<MemberPointerType>()) 2791 return !ABI->getMemberPointerInfo(MPT).HasPadding; 2792 2793 if (Ty->isRecordType()) { 2794 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl(); 2795 2796 if (Record->isInvalidDecl()) 2797 return false; 2798 2799 if (Record->isUnion()) 2800 return unionHasUniqueObjectRepresentations(*this, Record, 2801 CheckIfTriviallyCopyable); 2802 2803 std::optional<int64_t> StructSize = structHasUniqueObjectRepresentations( 2804 *this, Record, CheckIfTriviallyCopyable); 2805 2806 return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty)); 2807 } 2808 2809 // FIXME: More cases to handle here (list by rsmith): 2810 // vectors (careful about, eg, vector of 3 foo) 2811 // _Complex int and friends 2812 // _Atomic T 2813 // Obj-C block pointers 2814 // Obj-C object pointers 2815 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, 2816 // clk_event_t, queue_t, reserve_id_t) 2817 // There're also Obj-C class types and the Obj-C selector type, but I think it 2818 // makes sense for those to return false here. 2819 2820 return false; 2821 } 2822 2823 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 2824 unsigned count = 0; 2825 // Count ivars declared in class extension. 2826 for (const auto *Ext : OI->known_extensions()) 2827 count += Ext->ivar_size(); 2828 2829 // Count ivar defined in this class's implementation. This 2830 // includes synthesized ivars. 2831 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 2832 count += ImplDecl->ivar_size(); 2833 2834 return count; 2835 } 2836 2837 bool ASTContext::isSentinelNullExpr(const Expr *E) { 2838 if (!E) 2839 return false; 2840 2841 // nullptr_t is always treated as null. 2842 if (E->getType()->isNullPtrType()) return true; 2843 2844 if (E->getType()->isAnyPointerType() && 2845 E->IgnoreParenCasts()->isNullPointerConstant(*this, 2846 Expr::NPC_ValueDependentIsNull)) 2847 return true; 2848 2849 // Unfortunately, __null has type 'int'. 2850 if (isa<GNUNullExpr>(E)) return true; 2851 2852 return false; 2853 } 2854 2855 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none 2856 /// exists. 2857 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 2858 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2859 I = ObjCImpls.find(D); 2860 if (I != ObjCImpls.end()) 2861 return cast<ObjCImplementationDecl>(I->second); 2862 return nullptr; 2863 } 2864 2865 /// Get the implementation of ObjCCategoryDecl, or nullptr if none 2866 /// exists. 2867 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 2868 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2869 I = ObjCImpls.find(D); 2870 if (I != ObjCImpls.end()) 2871 return cast<ObjCCategoryImplDecl>(I->second); 2872 return nullptr; 2873 } 2874 2875 /// Set the implementation of ObjCInterfaceDecl. 2876 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 2877 ObjCImplementationDecl *ImplD) { 2878 assert(IFaceD && ImplD && "Passed null params"); 2879 ObjCImpls[IFaceD] = ImplD; 2880 } 2881 2882 /// Set the implementation of ObjCCategoryDecl. 2883 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 2884 ObjCCategoryImplDecl *ImplD) { 2885 assert(CatD && ImplD && "Passed null params"); 2886 ObjCImpls[CatD] = ImplD; 2887 } 2888 2889 const ObjCMethodDecl * 2890 ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { 2891 return ObjCMethodRedecls.lookup(MD); 2892 } 2893 2894 void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, 2895 const ObjCMethodDecl *Redecl) { 2896 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); 2897 ObjCMethodRedecls[MD] = Redecl; 2898 } 2899 2900 const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( 2901 const NamedDecl *ND) const { 2902 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) 2903 return ID; 2904 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) 2905 return CD->getClassInterface(); 2906 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext())) 2907 return IMD->getClassInterface(); 2908 2909 return nullptr; 2910 } 2911 2912 /// Get the copy initialization expression of VarDecl, or nullptr if 2913 /// none exists. 2914 BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { 2915 assert(VD && "Passed null params"); 2916 assert(VD->hasAttr<BlocksAttr>() && 2917 "getBlockVarCopyInits - not __block var"); 2918 auto I = BlockVarCopyInits.find(VD); 2919 if (I != BlockVarCopyInits.end()) 2920 return I->second; 2921 return {nullptr, false}; 2922 } 2923 2924 /// Set the copy initialization expression of a block var decl. 2925 void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, 2926 bool CanThrow) { 2927 assert(VD && CopyExpr && "Passed null params"); 2928 assert(VD->hasAttr<BlocksAttr>() && 2929 "setBlockVarCopyInits - not __block var"); 2930 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); 2931 } 2932 2933 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 2934 unsigned DataSize) const { 2935 if (!DataSize) 2936 DataSize = TypeLoc::getFullDataSizeForType(T); 2937 else 2938 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 2939 "incorrect data size provided to CreateTypeSourceInfo!"); 2940 2941 auto *TInfo = 2942 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 2943 new (TInfo) TypeSourceInfo(T, DataSize); 2944 return TInfo; 2945 } 2946 2947 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 2948 SourceLocation L) const { 2949 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 2950 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 2951 return DI; 2952 } 2953 2954 const ASTRecordLayout & 2955 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 2956 return getObjCLayout(D, nullptr); 2957 } 2958 2959 const ASTRecordLayout & 2960 ASTContext::getASTObjCImplementationLayout( 2961 const ObjCImplementationDecl *D) const { 2962 return getObjCLayout(D->getClassInterface(), D); 2963 } 2964 2965 static auto getCanonicalTemplateArguments(const ASTContext &C, 2966 ArrayRef<TemplateArgument> Args, 2967 bool &AnyNonCanonArgs) { 2968 SmallVector<TemplateArgument, 16> CanonArgs(Args); 2969 for (auto &Arg : CanonArgs) { 2970 TemplateArgument OrigArg = Arg; 2971 Arg = C.getCanonicalTemplateArgument(Arg); 2972 AnyNonCanonArgs |= !Arg.structurallyEquals(OrigArg); 2973 } 2974 return CanonArgs; 2975 } 2976 2977 //===----------------------------------------------------------------------===// 2978 // Type creation/memoization methods 2979 //===----------------------------------------------------------------------===// 2980 2981 QualType 2982 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 2983 unsigned fastQuals = quals.getFastQualifiers(); 2984 quals.removeFastQualifiers(); 2985 2986 // Check if we've already instantiated this type. 2987 llvm::FoldingSetNodeID ID; 2988 ExtQuals::Profile(ID, baseType, quals); 2989 void *insertPos = nullptr; 2990 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 2991 assert(eq->getQualifiers() == quals); 2992 return QualType(eq, fastQuals); 2993 } 2994 2995 // If the base type is not canonical, make the appropriate canonical type. 2996 QualType canon; 2997 if (!baseType->isCanonicalUnqualified()) { 2998 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 2999 canonSplit.Quals.addConsistentQualifiers(quals); 3000 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); 3001 3002 // Re-find the insert position. 3003 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 3004 } 3005 3006 auto *eq = new (*this, alignof(ExtQuals)) ExtQuals(baseType, canon, quals); 3007 ExtQualNodes.InsertNode(eq, insertPos); 3008 return QualType(eq, fastQuals); 3009 } 3010 3011 QualType ASTContext::getAddrSpaceQualType(QualType T, 3012 LangAS AddressSpace) const { 3013 QualType CanT = getCanonicalType(T); 3014 if (CanT.getAddressSpace() == AddressSpace) 3015 return T; 3016 3017 // If we are composing extended qualifiers together, merge together 3018 // into one ExtQuals node. 3019 QualifierCollector Quals; 3020 const Type *TypeNode = Quals.strip(T); 3021 3022 // If this type already has an address space specified, it cannot get 3023 // another one. 3024 assert(!Quals.hasAddressSpace() && 3025 "Type cannot be in multiple addr spaces!"); 3026 Quals.addAddressSpace(AddressSpace); 3027 3028 return getExtQualType(TypeNode, Quals); 3029 } 3030 3031 QualType ASTContext::removeAddrSpaceQualType(QualType T) const { 3032 // If the type is not qualified with an address space, just return it 3033 // immediately. 3034 if (!T.hasAddressSpace()) 3035 return T; 3036 3037 // If we are composing extended qualifiers together, merge together 3038 // into one ExtQuals node. 3039 QualifierCollector Quals; 3040 const Type *TypeNode; 3041 3042 while (T.hasAddressSpace()) { 3043 TypeNode = Quals.strip(T); 3044 3045 // If the type no longer has an address space after stripping qualifiers, 3046 // jump out. 3047 if (!QualType(TypeNode, 0).hasAddressSpace()) 3048 break; 3049 3050 // There might be sugar in the way. Strip it and try again. 3051 T = T.getSingleStepDesugaredType(*this); 3052 } 3053 3054 Quals.removeAddressSpace(); 3055 3056 // Removal of the address space can mean there are no longer any 3057 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) 3058 // or required. 3059 if (Quals.hasNonFastQualifiers()) 3060 return getExtQualType(TypeNode, Quals); 3061 else 3062 return QualType(TypeNode, Quals.getFastQualifiers()); 3063 } 3064 3065 QualType ASTContext::getObjCGCQualType(QualType T, 3066 Qualifiers::GC GCAttr) const { 3067 QualType CanT = getCanonicalType(T); 3068 if (CanT.getObjCGCAttr() == GCAttr) 3069 return T; 3070 3071 if (const auto *ptr = T->getAs<PointerType>()) { 3072 QualType Pointee = ptr->getPointeeType(); 3073 if (Pointee->isAnyPointerType()) { 3074 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 3075 return getPointerType(ResultType); 3076 } 3077 } 3078 3079 // If we are composing extended qualifiers together, merge together 3080 // into one ExtQuals node. 3081 QualifierCollector Quals; 3082 const Type *TypeNode = Quals.strip(T); 3083 3084 // If this type already has an ObjCGC specified, it cannot get 3085 // another one. 3086 assert(!Quals.hasObjCGCAttr() && 3087 "Type cannot have multiple ObjCGCs!"); 3088 Quals.addObjCGCAttr(GCAttr); 3089 3090 return getExtQualType(TypeNode, Quals); 3091 } 3092 3093 QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { 3094 if (const PointerType *Ptr = T->getAs<PointerType>()) { 3095 QualType Pointee = Ptr->getPointeeType(); 3096 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { 3097 return getPointerType(removeAddrSpaceQualType(Pointee)); 3098 } 3099 } 3100 return T; 3101 } 3102 3103 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 3104 FunctionType::ExtInfo Info) { 3105 if (T->getExtInfo() == Info) 3106 return T; 3107 3108 QualType Result; 3109 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 3110 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); 3111 } else { 3112 const auto *FPT = cast<FunctionProtoType>(T); 3113 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3114 EPI.ExtInfo = Info; 3115 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); 3116 } 3117 3118 return cast<FunctionType>(Result.getTypePtr()); 3119 } 3120 3121 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, 3122 QualType ResultType) { 3123 FD = FD->getMostRecentDecl(); 3124 while (true) { 3125 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 3126 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3127 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); 3128 if (FunctionDecl *Next = FD->getPreviousDecl()) 3129 FD = Next; 3130 else 3131 break; 3132 } 3133 if (ASTMutationListener *L = getASTMutationListener()) 3134 L->DeducedReturnType(FD, ResultType); 3135 } 3136 3137 /// Get a function type and produce the equivalent function type with the 3138 /// specified exception specification. Type sugar that can be present on a 3139 /// declaration of a function with an exception specification is permitted 3140 /// and preserved. Other type sugar (for instance, typedefs) is not. 3141 QualType ASTContext::getFunctionTypeWithExceptionSpec( 3142 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) const { 3143 // Might have some parens. 3144 if (const auto *PT = dyn_cast<ParenType>(Orig)) 3145 return getParenType( 3146 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); 3147 3148 // Might be wrapped in a macro qualified type. 3149 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig)) 3150 return getMacroQualifiedType( 3151 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), 3152 MQT->getMacroIdentifier()); 3153 3154 // Might have a calling-convention attribute. 3155 if (const auto *AT = dyn_cast<AttributedType>(Orig)) 3156 return getAttributedType( 3157 AT->getAttrKind(), 3158 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), 3159 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); 3160 3161 // Anything else must be a function type. Rebuild it with the new exception 3162 // specification. 3163 const auto *Proto = Orig->castAs<FunctionProtoType>(); 3164 return getFunctionType( 3165 Proto->getReturnType(), Proto->getParamTypes(), 3166 Proto->getExtProtoInfo().withExceptionSpec(ESI)); 3167 } 3168 3169 bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, 3170 QualType U) const { 3171 return hasSameType(T, U) || 3172 (getLangOpts().CPlusPlus17 && 3173 hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), 3174 getFunctionTypeWithExceptionSpec(U, EST_None))); 3175 } 3176 3177 QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { 3178 if (const auto *Proto = T->getAs<FunctionProtoType>()) { 3179 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3180 SmallVector<QualType, 16> Args(Proto->param_types().size()); 3181 for (unsigned i = 0, n = Args.size(); i != n; ++i) 3182 Args[i] = removePtrSizeAddrSpace(Proto->param_types()[i]); 3183 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); 3184 } 3185 3186 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) { 3187 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3188 return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); 3189 } 3190 3191 return T; 3192 } 3193 3194 bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { 3195 return hasSameType(T, U) || 3196 hasSameType(getFunctionTypeWithoutPtrSizes(T), 3197 getFunctionTypeWithoutPtrSizes(U)); 3198 } 3199 3200 void ASTContext::adjustExceptionSpec( 3201 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, 3202 bool AsWritten) { 3203 // Update the type. 3204 QualType Updated = 3205 getFunctionTypeWithExceptionSpec(FD->getType(), ESI); 3206 FD->setType(Updated); 3207 3208 if (!AsWritten) 3209 return; 3210 3211 // Update the type in the type source information too. 3212 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { 3213 // If the type and the type-as-written differ, we may need to update 3214 // the type-as-written too. 3215 if (TSInfo->getType() != FD->getType()) 3216 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); 3217 3218 // FIXME: When we get proper type location information for exceptions, 3219 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch 3220 // up the TypeSourceInfo; 3221 assert(TypeLoc::getFullDataSizeForType(Updated) == 3222 TypeLoc::getFullDataSizeForType(TSInfo->getType()) && 3223 "TypeLoc size mismatch from updating exception specification"); 3224 TSInfo->overrideType(Updated); 3225 } 3226 } 3227 3228 /// getComplexType - Return the uniqued reference to the type for a complex 3229 /// number with the specified element type. 3230 QualType ASTContext::getComplexType(QualType T) const { 3231 // Unique pointers, to guarantee there is only one pointer of a particular 3232 // structure. 3233 llvm::FoldingSetNodeID ID; 3234 ComplexType::Profile(ID, T); 3235 3236 void *InsertPos = nullptr; 3237 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 3238 return QualType(CT, 0); 3239 3240 // If the pointee type isn't canonical, this won't be a canonical type either, 3241 // so fill in the canonical type field. 3242 QualType Canonical; 3243 if (!T.isCanonical()) { 3244 Canonical = getComplexType(getCanonicalType(T)); 3245 3246 // Get the new insert position for the node we care about. 3247 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 3248 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3249 } 3250 auto *New = new (*this, alignof(ComplexType)) ComplexType(T, Canonical); 3251 Types.push_back(New); 3252 ComplexTypes.InsertNode(New, InsertPos); 3253 return QualType(New, 0); 3254 } 3255 3256 /// getPointerType - Return the uniqued reference to the type for a pointer to 3257 /// the specified type. 3258 QualType ASTContext::getPointerType(QualType T) const { 3259 // Unique pointers, to guarantee there is only one pointer of a particular 3260 // structure. 3261 llvm::FoldingSetNodeID ID; 3262 PointerType::Profile(ID, T); 3263 3264 void *InsertPos = nullptr; 3265 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3266 return QualType(PT, 0); 3267 3268 // If the pointee type isn't canonical, this won't be a canonical type either, 3269 // so fill in the canonical type field. 3270 QualType Canonical; 3271 if (!T.isCanonical()) { 3272 Canonical = getPointerType(getCanonicalType(T)); 3273 3274 // Get the new insert position for the node we care about. 3275 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3276 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3277 } 3278 auto *New = new (*this, alignof(PointerType)) PointerType(T, Canonical); 3279 Types.push_back(New); 3280 PointerTypes.InsertNode(New, InsertPos); 3281 return QualType(New, 0); 3282 } 3283 3284 QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { 3285 llvm::FoldingSetNodeID ID; 3286 AdjustedType::Profile(ID, Orig, New); 3287 void *InsertPos = nullptr; 3288 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3289 if (AT) 3290 return QualType(AT, 0); 3291 3292 QualType Canonical = getCanonicalType(New); 3293 3294 // Get the new insert position for the node we care about. 3295 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3296 assert(!AT && "Shouldn't be in the map!"); 3297 3298 AT = new (*this, alignof(AdjustedType)) 3299 AdjustedType(Type::Adjusted, Orig, New, Canonical); 3300 Types.push_back(AT); 3301 AdjustedTypes.InsertNode(AT, InsertPos); 3302 return QualType(AT, 0); 3303 } 3304 3305 QualType ASTContext::getDecayedType(QualType Orig, QualType Decayed) const { 3306 llvm::FoldingSetNodeID ID; 3307 AdjustedType::Profile(ID, Orig, Decayed); 3308 void *InsertPos = nullptr; 3309 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3310 if (AT) 3311 return QualType(AT, 0); 3312 3313 QualType Canonical = getCanonicalType(Decayed); 3314 3315 // Get the new insert position for the node we care about. 3316 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3317 assert(!AT && "Shouldn't be in the map!"); 3318 3319 AT = new (*this, alignof(DecayedType)) DecayedType(Orig, Decayed, Canonical); 3320 Types.push_back(AT); 3321 AdjustedTypes.InsertNode(AT, InsertPos); 3322 return QualType(AT, 0); 3323 } 3324 3325 QualType ASTContext::getDecayedType(QualType T) const { 3326 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); 3327 3328 QualType Decayed; 3329 3330 // C99 6.7.5.3p7: 3331 // A declaration of a parameter as "array of type" shall be 3332 // adjusted to "qualified pointer to type", where the type 3333 // qualifiers (if any) are those specified within the [ and ] of 3334 // the array type derivation. 3335 if (T->isArrayType()) 3336 Decayed = getArrayDecayedType(T); 3337 3338 // C99 6.7.5.3p8: 3339 // A declaration of a parameter as "function returning type" 3340 // shall be adjusted to "pointer to function returning type", as 3341 // in 6.3.2.1. 3342 if (T->isFunctionType()) 3343 Decayed = getPointerType(T); 3344 3345 return getDecayedType(T, Decayed); 3346 } 3347 3348 /// getBlockPointerType - Return the uniqued reference to the type for 3349 /// a pointer to the specified block. 3350 QualType ASTContext::getBlockPointerType(QualType T) const { 3351 assert(T->isFunctionType() && "block of function types only"); 3352 // Unique pointers, to guarantee there is only one block of a particular 3353 // structure. 3354 llvm::FoldingSetNodeID ID; 3355 BlockPointerType::Profile(ID, T); 3356 3357 void *InsertPos = nullptr; 3358 if (BlockPointerType *PT = 3359 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3360 return QualType(PT, 0); 3361 3362 // If the block pointee type isn't canonical, this won't be a canonical 3363 // type either so fill in the canonical type field. 3364 QualType Canonical; 3365 if (!T.isCanonical()) { 3366 Canonical = getBlockPointerType(getCanonicalType(T)); 3367 3368 // Get the new insert position for the node we care about. 3369 BlockPointerType *NewIP = 3370 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3371 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3372 } 3373 auto *New = 3374 new (*this, alignof(BlockPointerType)) BlockPointerType(T, Canonical); 3375 Types.push_back(New); 3376 BlockPointerTypes.InsertNode(New, InsertPos); 3377 return QualType(New, 0); 3378 } 3379 3380 /// getLValueReferenceType - Return the uniqued reference to the type for an 3381 /// lvalue reference to the specified type. 3382 QualType 3383 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 3384 assert((!T->isPlaceholderType() || 3385 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3386 "Unresolved placeholder type"); 3387 3388 // Unique pointers, to guarantee there is only one pointer of a particular 3389 // structure. 3390 llvm::FoldingSetNodeID ID; 3391 ReferenceType::Profile(ID, T, SpelledAsLValue); 3392 3393 void *InsertPos = nullptr; 3394 if (LValueReferenceType *RT = 3395 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3396 return QualType(RT, 0); 3397 3398 const auto *InnerRef = T->getAs<ReferenceType>(); 3399 3400 // If the referencee type isn't canonical, this won't be a canonical type 3401 // either, so fill in the canonical type field. 3402 QualType Canonical; 3403 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 3404 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3405 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 3406 3407 // Get the new insert position for the node we care about. 3408 LValueReferenceType *NewIP = 3409 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3410 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3411 } 3412 3413 auto *New = new (*this, alignof(LValueReferenceType)) 3414 LValueReferenceType(T, Canonical, SpelledAsLValue); 3415 Types.push_back(New); 3416 LValueReferenceTypes.InsertNode(New, InsertPos); 3417 3418 return QualType(New, 0); 3419 } 3420 3421 /// getRValueReferenceType - Return the uniqued reference to the type for an 3422 /// rvalue reference to the specified type. 3423 QualType ASTContext::getRValueReferenceType(QualType T) const { 3424 assert((!T->isPlaceholderType() || 3425 T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) && 3426 "Unresolved placeholder type"); 3427 3428 // Unique pointers, to guarantee there is only one pointer of a particular 3429 // structure. 3430 llvm::FoldingSetNodeID ID; 3431 ReferenceType::Profile(ID, T, false); 3432 3433 void *InsertPos = nullptr; 3434 if (RValueReferenceType *RT = 3435 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3436 return QualType(RT, 0); 3437 3438 const auto *InnerRef = T->getAs<ReferenceType>(); 3439 3440 // If the referencee type isn't canonical, this won't be a canonical type 3441 // either, so fill in the canonical type field. 3442 QualType Canonical; 3443 if (InnerRef || !T.isCanonical()) { 3444 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3445 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 3446 3447 // Get the new insert position for the node we care about. 3448 RValueReferenceType *NewIP = 3449 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3450 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3451 } 3452 3453 auto *New = new (*this, alignof(RValueReferenceType)) 3454 RValueReferenceType(T, Canonical); 3455 Types.push_back(New); 3456 RValueReferenceTypes.InsertNode(New, InsertPos); 3457 return QualType(New, 0); 3458 } 3459 3460 /// getMemberPointerType - Return the uniqued reference to the type for a 3461 /// member pointer to the specified type, in the specified class. 3462 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 3463 // Unique pointers, to guarantee there is only one pointer of a particular 3464 // structure. 3465 llvm::FoldingSetNodeID ID; 3466 MemberPointerType::Profile(ID, T, Cls); 3467 3468 void *InsertPos = nullptr; 3469 if (MemberPointerType *PT = 3470 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3471 return QualType(PT, 0); 3472 3473 // If the pointee or class type isn't canonical, this won't be a canonical 3474 // type either, so fill in the canonical type field. 3475 QualType Canonical; 3476 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 3477 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 3478 3479 // Get the new insert position for the node we care about. 3480 MemberPointerType *NewIP = 3481 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3482 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3483 } 3484 auto *New = new (*this, alignof(MemberPointerType)) 3485 MemberPointerType(T, Cls, Canonical); 3486 Types.push_back(New); 3487 MemberPointerTypes.InsertNode(New, InsertPos); 3488 return QualType(New, 0); 3489 } 3490 3491 /// getConstantArrayType - Return the unique reference to the type for an 3492 /// array of the specified element type. 3493 QualType ASTContext::getConstantArrayType(QualType EltTy, 3494 const llvm::APInt &ArySizeIn, 3495 const Expr *SizeExpr, 3496 ArraySizeModifier ASM, 3497 unsigned IndexTypeQuals) const { 3498 assert((EltTy->isDependentType() || 3499 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 3500 "Constant array of VLAs is illegal!"); 3501 3502 // We only need the size as part of the type if it's instantiation-dependent. 3503 if (SizeExpr && !SizeExpr->isInstantiationDependent()) 3504 SizeExpr = nullptr; 3505 3506 // Convert the array size into a canonical width matching the pointer size for 3507 // the target. 3508 llvm::APInt ArySize(ArySizeIn); 3509 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); 3510 3511 llvm::FoldingSetNodeID ID; 3512 ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, 3513 IndexTypeQuals); 3514 3515 void *InsertPos = nullptr; 3516 if (ConstantArrayType *ATP = 3517 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 3518 return QualType(ATP, 0); 3519 3520 // If the element type isn't canonical or has qualifiers, or the array bound 3521 // is instantiation-dependent, this won't be a canonical type either, so fill 3522 // in the canonical type field. 3523 QualType Canon; 3524 // FIXME: Check below should look for qualifiers behind sugar. 3525 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { 3526 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3527 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, 3528 ASM, IndexTypeQuals); 3529 Canon = getQualifiedType(Canon, canonSplit.Quals); 3530 3531 // Get the new insert position for the node we care about. 3532 ConstantArrayType *NewIP = 3533 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 3534 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3535 } 3536 3537 void *Mem = Allocate( 3538 ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0), 3539 alignof(ConstantArrayType)); 3540 auto *New = new (Mem) 3541 ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); 3542 ConstantArrayTypes.InsertNode(New, InsertPos); 3543 Types.push_back(New); 3544 return QualType(New, 0); 3545 } 3546 3547 /// getVariableArrayDecayedType - Turns the given type, which may be 3548 /// variably-modified, into the corresponding type with all the known 3549 /// sizes replaced with [*]. 3550 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 3551 // Vastly most common case. 3552 if (!type->isVariablyModifiedType()) return type; 3553 3554 QualType result; 3555 3556 SplitQualType split = type.getSplitDesugaredType(); 3557 const Type *ty = split.Ty; 3558 switch (ty->getTypeClass()) { 3559 #define TYPE(Class, Base) 3560 #define ABSTRACT_TYPE(Class, Base) 3561 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3562 #include "clang/AST/TypeNodes.inc" 3563 llvm_unreachable("didn't desugar past all non-canonical types?"); 3564 3565 // These types should never be variably-modified. 3566 case Type::Builtin: 3567 case Type::Complex: 3568 case Type::Vector: 3569 case Type::DependentVector: 3570 case Type::ExtVector: 3571 case Type::DependentSizedExtVector: 3572 case Type::ConstantMatrix: 3573 case Type::DependentSizedMatrix: 3574 case Type::DependentAddressSpace: 3575 case Type::ObjCObject: 3576 case Type::ObjCInterface: 3577 case Type::ObjCObjectPointer: 3578 case Type::Record: 3579 case Type::Enum: 3580 case Type::UnresolvedUsing: 3581 case Type::TypeOfExpr: 3582 case Type::TypeOf: 3583 case Type::Decltype: 3584 case Type::UnaryTransform: 3585 case Type::DependentName: 3586 case Type::InjectedClassName: 3587 case Type::TemplateSpecialization: 3588 case Type::DependentTemplateSpecialization: 3589 case Type::TemplateTypeParm: 3590 case Type::SubstTemplateTypeParmPack: 3591 case Type::Auto: 3592 case Type::DeducedTemplateSpecialization: 3593 case Type::PackExpansion: 3594 case Type::BitInt: 3595 case Type::DependentBitInt: 3596 llvm_unreachable("type should never be variably-modified"); 3597 3598 // These types can be variably-modified but should never need to 3599 // further decay. 3600 case Type::FunctionNoProto: 3601 case Type::FunctionProto: 3602 case Type::BlockPointer: 3603 case Type::MemberPointer: 3604 case Type::Pipe: 3605 return type; 3606 3607 // These types can be variably-modified. All these modifications 3608 // preserve structure except as noted by comments. 3609 // TODO: if we ever care about optimizing VLAs, there are no-op 3610 // optimizations available here. 3611 case Type::Pointer: 3612 result = getPointerType(getVariableArrayDecayedType( 3613 cast<PointerType>(ty)->getPointeeType())); 3614 break; 3615 3616 case Type::LValueReference: { 3617 const auto *lv = cast<LValueReferenceType>(ty); 3618 result = getLValueReferenceType( 3619 getVariableArrayDecayedType(lv->getPointeeType()), 3620 lv->isSpelledAsLValue()); 3621 break; 3622 } 3623 3624 case Type::RValueReference: { 3625 const auto *lv = cast<RValueReferenceType>(ty); 3626 result = getRValueReferenceType( 3627 getVariableArrayDecayedType(lv->getPointeeType())); 3628 break; 3629 } 3630 3631 case Type::Atomic: { 3632 const auto *at = cast<AtomicType>(ty); 3633 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 3634 break; 3635 } 3636 3637 case Type::ConstantArray: { 3638 const auto *cat = cast<ConstantArrayType>(ty); 3639 result = getConstantArrayType( 3640 getVariableArrayDecayedType(cat->getElementType()), 3641 cat->getSize(), 3642 cat->getSizeExpr(), 3643 cat->getSizeModifier(), 3644 cat->getIndexTypeCVRQualifiers()); 3645 break; 3646 } 3647 3648 case Type::DependentSizedArray: { 3649 const auto *dat = cast<DependentSizedArrayType>(ty); 3650 result = getDependentSizedArrayType( 3651 getVariableArrayDecayedType(dat->getElementType()), 3652 dat->getSizeExpr(), 3653 dat->getSizeModifier(), 3654 dat->getIndexTypeCVRQualifiers(), 3655 dat->getBracketsRange()); 3656 break; 3657 } 3658 3659 // Turn incomplete types into [*] types. 3660 case Type::IncompleteArray: { 3661 const auto *iat = cast<IncompleteArrayType>(ty); 3662 result = 3663 getVariableArrayType(getVariableArrayDecayedType(iat->getElementType()), 3664 /*size*/ nullptr, ArraySizeModifier::Normal, 3665 iat->getIndexTypeCVRQualifiers(), SourceRange()); 3666 break; 3667 } 3668 3669 // Turn VLA types into [*] types. 3670 case Type::VariableArray: { 3671 const auto *vat = cast<VariableArrayType>(ty); 3672 result = getVariableArrayType( 3673 getVariableArrayDecayedType(vat->getElementType()), 3674 /*size*/ nullptr, ArraySizeModifier::Star, 3675 vat->getIndexTypeCVRQualifiers(), vat->getBracketsRange()); 3676 break; 3677 } 3678 } 3679 3680 // Apply the top-level qualifiers from the original. 3681 return getQualifiedType(result, split.Quals); 3682 } 3683 3684 /// getVariableArrayType - Returns a non-unique reference to the type for a 3685 /// variable array of the specified element type. 3686 QualType ASTContext::getVariableArrayType(QualType EltTy, Expr *NumElts, 3687 ArraySizeModifier ASM, 3688 unsigned IndexTypeQuals, 3689 SourceRange Brackets) const { 3690 // Since we don't unique expressions, it isn't possible to unique VLA's 3691 // that have an expression provided for their size. 3692 QualType Canon; 3693 3694 // Be sure to pull qualifiers off the element type. 3695 // FIXME: Check below should look for qualifiers behind sugar. 3696 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 3697 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3698 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, 3699 IndexTypeQuals, Brackets); 3700 Canon = getQualifiedType(Canon, canonSplit.Quals); 3701 } 3702 3703 auto *New = new (*this, alignof(VariableArrayType)) 3704 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 3705 3706 VariableArrayTypes.push_back(New); 3707 Types.push_back(New); 3708 return QualType(New, 0); 3709 } 3710 3711 /// getDependentSizedArrayType - Returns a non-unique reference to 3712 /// the type for a dependently-sized array of the specified element 3713 /// type. 3714 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 3715 Expr *numElements, 3716 ArraySizeModifier ASM, 3717 unsigned elementTypeQuals, 3718 SourceRange brackets) const { 3719 assert((!numElements || numElements->isTypeDependent() || 3720 numElements->isValueDependent()) && 3721 "Size must be type- or value-dependent!"); 3722 3723 // Dependently-sized array types that do not have a specified number 3724 // of elements will have their sizes deduced from a dependent 3725 // initializer. We do no canonicalization here at all, which is okay 3726 // because they can't be used in most locations. 3727 if (!numElements) { 3728 auto *newType = new (*this, alignof(DependentSizedArrayType)) 3729 DependentSizedArrayType(elementType, QualType(), numElements, ASM, 3730 elementTypeQuals, brackets); 3731 Types.push_back(newType); 3732 return QualType(newType, 0); 3733 } 3734 3735 // Otherwise, we actually build a new type every time, but we 3736 // also build a canonical type. 3737 3738 SplitQualType canonElementType = getCanonicalType(elementType).split(); 3739 3740 void *insertPos = nullptr; 3741 llvm::FoldingSetNodeID ID; 3742 DependentSizedArrayType::Profile(ID, *this, 3743 QualType(canonElementType.Ty, 0), 3744 ASM, elementTypeQuals, numElements); 3745 3746 // Look for an existing type with these properties. 3747 DependentSizedArrayType *canonTy = 3748 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3749 3750 // If we don't have one, build one. 3751 if (!canonTy) { 3752 canonTy = new (*this, alignof(DependentSizedArrayType)) 3753 DependentSizedArrayType(QualType(canonElementType.Ty, 0), QualType(), 3754 numElements, ASM, elementTypeQuals, brackets); 3755 DependentSizedArrayTypes.InsertNode(canonTy, insertPos); 3756 Types.push_back(canonTy); 3757 } 3758 3759 // Apply qualifiers from the element type to the array. 3760 QualType canon = getQualifiedType(QualType(canonTy,0), 3761 canonElementType.Quals); 3762 3763 // If we didn't need extra canonicalization for the element type or the size 3764 // expression, then just use that as our result. 3765 if (QualType(canonElementType.Ty, 0) == elementType && 3766 canonTy->getSizeExpr() == numElements) 3767 return canon; 3768 3769 // Otherwise, we need to build a type which follows the spelling 3770 // of the element type. 3771 auto *sugaredType = new (*this, alignof(DependentSizedArrayType)) 3772 DependentSizedArrayType(elementType, canon, numElements, ASM, 3773 elementTypeQuals, brackets); 3774 Types.push_back(sugaredType); 3775 return QualType(sugaredType, 0); 3776 } 3777 3778 QualType ASTContext::getIncompleteArrayType(QualType elementType, 3779 ArraySizeModifier ASM, 3780 unsigned elementTypeQuals) const { 3781 llvm::FoldingSetNodeID ID; 3782 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); 3783 3784 void *insertPos = nullptr; 3785 if (IncompleteArrayType *iat = 3786 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) 3787 return QualType(iat, 0); 3788 3789 // If the element type isn't canonical, this won't be a canonical type 3790 // either, so fill in the canonical type field. We also have to pull 3791 // qualifiers off the element type. 3792 QualType canon; 3793 3794 // FIXME: Check below should look for qualifiers behind sugar. 3795 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { 3796 SplitQualType canonSplit = getCanonicalType(elementType).split(); 3797 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), 3798 ASM, elementTypeQuals); 3799 canon = getQualifiedType(canon, canonSplit.Quals); 3800 3801 // Get the new insert position for the node we care about. 3802 IncompleteArrayType *existing = 3803 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3804 assert(!existing && "Shouldn't be in the map!"); (void) existing; 3805 } 3806 3807 auto *newType = new (*this, alignof(IncompleteArrayType)) 3808 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); 3809 3810 IncompleteArrayTypes.InsertNode(newType, insertPos); 3811 Types.push_back(newType); 3812 return QualType(newType, 0); 3813 } 3814 3815 ASTContext::BuiltinVectorTypeInfo 3816 ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { 3817 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \ 3818 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \ 3819 NUMVECTORS}; 3820 3821 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \ 3822 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; 3823 3824 switch (Ty->getKind()) { 3825 default: 3826 llvm_unreachable("Unsupported builtin vector type"); 3827 case BuiltinType::SveInt8: 3828 return SVE_INT_ELTTY(8, 16, true, 1); 3829 case BuiltinType::SveUint8: 3830 return SVE_INT_ELTTY(8, 16, false, 1); 3831 case BuiltinType::SveInt8x2: 3832 return SVE_INT_ELTTY(8, 16, true, 2); 3833 case BuiltinType::SveUint8x2: 3834 return SVE_INT_ELTTY(8, 16, false, 2); 3835 case BuiltinType::SveInt8x3: 3836 return SVE_INT_ELTTY(8, 16, true, 3); 3837 case BuiltinType::SveUint8x3: 3838 return SVE_INT_ELTTY(8, 16, false, 3); 3839 case BuiltinType::SveInt8x4: 3840 return SVE_INT_ELTTY(8, 16, true, 4); 3841 case BuiltinType::SveUint8x4: 3842 return SVE_INT_ELTTY(8, 16, false, 4); 3843 case BuiltinType::SveInt16: 3844 return SVE_INT_ELTTY(16, 8, true, 1); 3845 case BuiltinType::SveUint16: 3846 return SVE_INT_ELTTY(16, 8, false, 1); 3847 case BuiltinType::SveInt16x2: 3848 return SVE_INT_ELTTY(16, 8, true, 2); 3849 case BuiltinType::SveUint16x2: 3850 return SVE_INT_ELTTY(16, 8, false, 2); 3851 case BuiltinType::SveInt16x3: 3852 return SVE_INT_ELTTY(16, 8, true, 3); 3853 case BuiltinType::SveUint16x3: 3854 return SVE_INT_ELTTY(16, 8, false, 3); 3855 case BuiltinType::SveInt16x4: 3856 return SVE_INT_ELTTY(16, 8, true, 4); 3857 case BuiltinType::SveUint16x4: 3858 return SVE_INT_ELTTY(16, 8, false, 4); 3859 case BuiltinType::SveInt32: 3860 return SVE_INT_ELTTY(32, 4, true, 1); 3861 case BuiltinType::SveUint32: 3862 return SVE_INT_ELTTY(32, 4, false, 1); 3863 case BuiltinType::SveInt32x2: 3864 return SVE_INT_ELTTY(32, 4, true, 2); 3865 case BuiltinType::SveUint32x2: 3866 return SVE_INT_ELTTY(32, 4, false, 2); 3867 case BuiltinType::SveInt32x3: 3868 return SVE_INT_ELTTY(32, 4, true, 3); 3869 case BuiltinType::SveUint32x3: 3870 return SVE_INT_ELTTY(32, 4, false, 3); 3871 case BuiltinType::SveInt32x4: 3872 return SVE_INT_ELTTY(32, 4, true, 4); 3873 case BuiltinType::SveUint32x4: 3874 return SVE_INT_ELTTY(32, 4, false, 4); 3875 case BuiltinType::SveInt64: 3876 return SVE_INT_ELTTY(64, 2, true, 1); 3877 case BuiltinType::SveUint64: 3878 return SVE_INT_ELTTY(64, 2, false, 1); 3879 case BuiltinType::SveInt64x2: 3880 return SVE_INT_ELTTY(64, 2, true, 2); 3881 case BuiltinType::SveUint64x2: 3882 return SVE_INT_ELTTY(64, 2, false, 2); 3883 case BuiltinType::SveInt64x3: 3884 return SVE_INT_ELTTY(64, 2, true, 3); 3885 case BuiltinType::SveUint64x3: 3886 return SVE_INT_ELTTY(64, 2, false, 3); 3887 case BuiltinType::SveInt64x4: 3888 return SVE_INT_ELTTY(64, 2, true, 4); 3889 case BuiltinType::SveUint64x4: 3890 return SVE_INT_ELTTY(64, 2, false, 4); 3891 case BuiltinType::SveBool: 3892 return SVE_ELTTY(BoolTy, 16, 1); 3893 case BuiltinType::SveBoolx2: 3894 return SVE_ELTTY(BoolTy, 16, 2); 3895 case BuiltinType::SveBoolx4: 3896 return SVE_ELTTY(BoolTy, 16, 4); 3897 case BuiltinType::SveFloat16: 3898 return SVE_ELTTY(HalfTy, 8, 1); 3899 case BuiltinType::SveFloat16x2: 3900 return SVE_ELTTY(HalfTy, 8, 2); 3901 case BuiltinType::SveFloat16x3: 3902 return SVE_ELTTY(HalfTy, 8, 3); 3903 case BuiltinType::SveFloat16x4: 3904 return SVE_ELTTY(HalfTy, 8, 4); 3905 case BuiltinType::SveFloat32: 3906 return SVE_ELTTY(FloatTy, 4, 1); 3907 case BuiltinType::SveFloat32x2: 3908 return SVE_ELTTY(FloatTy, 4, 2); 3909 case BuiltinType::SveFloat32x3: 3910 return SVE_ELTTY(FloatTy, 4, 3); 3911 case BuiltinType::SveFloat32x4: 3912 return SVE_ELTTY(FloatTy, 4, 4); 3913 case BuiltinType::SveFloat64: 3914 return SVE_ELTTY(DoubleTy, 2, 1); 3915 case BuiltinType::SveFloat64x2: 3916 return SVE_ELTTY(DoubleTy, 2, 2); 3917 case BuiltinType::SveFloat64x3: 3918 return SVE_ELTTY(DoubleTy, 2, 3); 3919 case BuiltinType::SveFloat64x4: 3920 return SVE_ELTTY(DoubleTy, 2, 4); 3921 case BuiltinType::SveBFloat16: 3922 return SVE_ELTTY(BFloat16Ty, 8, 1); 3923 case BuiltinType::SveBFloat16x2: 3924 return SVE_ELTTY(BFloat16Ty, 8, 2); 3925 case BuiltinType::SveBFloat16x3: 3926 return SVE_ELTTY(BFloat16Ty, 8, 3); 3927 case BuiltinType::SveBFloat16x4: 3928 return SVE_ELTTY(BFloat16Ty, 8, 4); 3929 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \ 3930 IsSigned) \ 3931 case BuiltinType::Id: \ 3932 return {getIntTypeForBitwidth(ElBits, IsSigned), \ 3933 llvm::ElementCount::getScalable(NumEls), NF}; 3934 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3935 case BuiltinType::Id: \ 3936 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ 3937 llvm::ElementCount::getScalable(NumEls), NF}; 3938 #define RVV_VECTOR_TYPE_BFLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3939 case BuiltinType::Id: \ 3940 return {BFloat16Ty, llvm::ElementCount::getScalable(NumEls), NF}; 3941 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3942 case BuiltinType::Id: \ 3943 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; 3944 #include "clang/Basic/RISCVVTypes.def" 3945 } 3946 } 3947 3948 /// getExternrefType - Return a WebAssembly externref type, which represents an 3949 /// opaque reference to a host value. 3950 QualType ASTContext::getWebAssemblyExternrefType() const { 3951 if (Target->getTriple().isWasm() && Target->hasFeature("reference-types")) { 3952 #define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \ 3953 if (BuiltinType::Id == BuiltinType::WasmExternRef) \ 3954 return SingletonId; 3955 #include "clang/Basic/WebAssemblyReferenceTypes.def" 3956 } 3957 llvm_unreachable( 3958 "shouldn't try to generate type externref outside WebAssembly target"); 3959 } 3960 3961 /// getScalableVectorType - Return the unique reference to a scalable vector 3962 /// type of the specified element type and size. VectorType must be a built-in 3963 /// type. 3964 QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts, 3965 unsigned NumFields) const { 3966 if (Target->hasAArch64SVETypes()) { 3967 uint64_t EltTySize = getTypeSize(EltTy); 3968 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 3969 IsSigned, IsFP, IsBF) \ 3970 if (!EltTy->isBooleanType() && \ 3971 ((EltTy->hasIntegerRepresentation() && \ 3972 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3973 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 3974 IsFP && !IsBF) || \ 3975 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 3976 IsBF && !IsFP)) && \ 3977 EltTySize == ElBits && NumElts == NumEls) { \ 3978 return SingletonId; \ 3979 } 3980 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 3981 if (EltTy->isBooleanType() && NumElts == NumEls) \ 3982 return SingletonId; 3983 #define SVE_OPAQUE_TYPE(Name, MangledName, Id, SingleTonId) 3984 #include "clang/Basic/AArch64SVEACLETypes.def" 3985 } else if (Target->hasRISCVVTypes()) { 3986 uint64_t EltTySize = getTypeSize(EltTy); 3987 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ 3988 IsFP, IsBF) \ 3989 if (!EltTy->isBooleanType() && \ 3990 ((EltTy->hasIntegerRepresentation() && \ 3991 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3992 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 3993 IsFP && !IsBF) || \ 3994 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 3995 IsBF && !IsFP)) && \ 3996 EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \ 3997 return SingletonId; 3998 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3999 if (EltTy->isBooleanType() && NumElts == NumEls) \ 4000 return SingletonId; 4001 #include "clang/Basic/RISCVVTypes.def" 4002 } 4003 return QualType(); 4004 } 4005 4006 /// getVectorType - Return the unique reference to a vector type of 4007 /// the specified element type and size. VectorType must be a built-in type. 4008 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, 4009 VectorKind VecKind) const { 4010 assert(vecType->isBuiltinType() || 4011 (vecType->isBitIntType() && 4012 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4013 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) && 4014 vecType->castAs<BitIntType>()->getNumBits() >= 8)); 4015 4016 // Check if we've already instantiated a vector of this type. 4017 llvm::FoldingSetNodeID ID; 4018 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); 4019 4020 void *InsertPos = nullptr; 4021 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4022 return QualType(VTP, 0); 4023 4024 // If the element type isn't canonical, this won't be a canonical type either, 4025 // so fill in the canonical type field. 4026 QualType Canonical; 4027 if (!vecType.isCanonical()) { 4028 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); 4029 4030 // Get the new insert position for the node we care about. 4031 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4032 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4033 } 4034 auto *New = new (*this, alignof(VectorType)) 4035 VectorType(vecType, NumElts, Canonical, VecKind); 4036 VectorTypes.InsertNode(New, InsertPos); 4037 Types.push_back(New); 4038 return QualType(New, 0); 4039 } 4040 4041 QualType ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, 4042 SourceLocation AttrLoc, 4043 VectorKind VecKind) const { 4044 llvm::FoldingSetNodeID ID; 4045 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, 4046 VecKind); 4047 void *InsertPos = nullptr; 4048 DependentVectorType *Canon = 4049 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4050 DependentVectorType *New; 4051 4052 if (Canon) { 4053 New = new (*this, alignof(DependentVectorType)) DependentVectorType( 4054 VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); 4055 } else { 4056 QualType CanonVecTy = getCanonicalType(VecType); 4057 if (CanonVecTy == VecType) { 4058 New = new (*this, alignof(DependentVectorType)) 4059 DependentVectorType(VecType, QualType(), SizeExpr, AttrLoc, VecKind); 4060 4061 DependentVectorType *CanonCheck = 4062 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4063 assert(!CanonCheck && 4064 "Dependent-sized vector_size canonical type broken"); 4065 (void)CanonCheck; 4066 DependentVectorTypes.InsertNode(New, InsertPos); 4067 } else { 4068 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, 4069 SourceLocation(), VecKind); 4070 New = new (*this, alignof(DependentVectorType)) 4071 DependentVectorType(VecType, CanonTy, SizeExpr, AttrLoc, VecKind); 4072 } 4073 } 4074 4075 Types.push_back(New); 4076 return QualType(New, 0); 4077 } 4078 4079 /// getExtVectorType - Return the unique reference to an extended vector type of 4080 /// the specified element type and size. VectorType must be a built-in type. 4081 QualType ASTContext::getExtVectorType(QualType vecType, 4082 unsigned NumElts) const { 4083 assert(vecType->isBuiltinType() || vecType->isDependentType() || 4084 (vecType->isBitIntType() && 4085 // Only support _BitInt elements with byte-sized power of 2 NumBits. 4086 llvm::isPowerOf2_32(vecType->castAs<BitIntType>()->getNumBits()) && 4087 vecType->castAs<BitIntType>()->getNumBits() >= 8)); 4088 4089 // Check if we've already instantiated a vector of this type. 4090 llvm::FoldingSetNodeID ID; 4091 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, 4092 VectorKind::Generic); 4093 void *InsertPos = nullptr; 4094 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4095 return QualType(VTP, 0); 4096 4097 // If the element type isn't canonical, this won't be a canonical type either, 4098 // so fill in the canonical type field. 4099 QualType Canonical; 4100 if (!vecType.isCanonical()) { 4101 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); 4102 4103 // Get the new insert position for the node we care about. 4104 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4105 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4106 } 4107 auto *New = new (*this, alignof(ExtVectorType)) 4108 ExtVectorType(vecType, NumElts, Canonical); 4109 VectorTypes.InsertNode(New, InsertPos); 4110 Types.push_back(New); 4111 return QualType(New, 0); 4112 } 4113 4114 QualType 4115 ASTContext::getDependentSizedExtVectorType(QualType vecType, 4116 Expr *SizeExpr, 4117 SourceLocation AttrLoc) const { 4118 llvm::FoldingSetNodeID ID; 4119 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), 4120 SizeExpr); 4121 4122 void *InsertPos = nullptr; 4123 DependentSizedExtVectorType *Canon 4124 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4125 DependentSizedExtVectorType *New; 4126 if (Canon) { 4127 // We already have a canonical version of this array type; use it as 4128 // the canonical type for a newly-built type. 4129 New = new (*this, alignof(DependentSizedExtVectorType)) 4130 DependentSizedExtVectorType(vecType, QualType(Canon, 0), SizeExpr, 4131 AttrLoc); 4132 } else { 4133 QualType CanonVecTy = getCanonicalType(vecType); 4134 if (CanonVecTy == vecType) { 4135 New = new (*this, alignof(DependentSizedExtVectorType)) 4136 DependentSizedExtVectorType(vecType, QualType(), SizeExpr, AttrLoc); 4137 4138 DependentSizedExtVectorType *CanonCheck 4139 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4140 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); 4141 (void)CanonCheck; 4142 DependentSizedExtVectorTypes.InsertNode(New, InsertPos); 4143 } else { 4144 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, 4145 SourceLocation()); 4146 New = new (*this, alignof(DependentSizedExtVectorType)) 4147 DependentSizedExtVectorType(vecType, CanonExtTy, SizeExpr, AttrLoc); 4148 } 4149 } 4150 4151 Types.push_back(New); 4152 return QualType(New, 0); 4153 } 4154 4155 QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, 4156 unsigned NumColumns) const { 4157 llvm::FoldingSetNodeID ID; 4158 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, 4159 Type::ConstantMatrix); 4160 4161 assert(MatrixType::isValidElementType(ElementTy) && 4162 "need a valid element type"); 4163 assert(ConstantMatrixType::isDimensionValid(NumRows) && 4164 ConstantMatrixType::isDimensionValid(NumColumns) && 4165 "need valid matrix dimensions"); 4166 void *InsertPos = nullptr; 4167 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) 4168 return QualType(MTP, 0); 4169 4170 QualType Canonical; 4171 if (!ElementTy.isCanonical()) { 4172 Canonical = 4173 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); 4174 4175 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4176 assert(!NewIP && "Matrix type shouldn't already exist in the map"); 4177 (void)NewIP; 4178 } 4179 4180 auto *New = new (*this, alignof(ConstantMatrixType)) 4181 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); 4182 MatrixTypes.InsertNode(New, InsertPos); 4183 Types.push_back(New); 4184 return QualType(New, 0); 4185 } 4186 4187 QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, 4188 Expr *RowExpr, 4189 Expr *ColumnExpr, 4190 SourceLocation AttrLoc) const { 4191 QualType CanonElementTy = getCanonicalType(ElementTy); 4192 llvm::FoldingSetNodeID ID; 4193 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, 4194 ColumnExpr); 4195 4196 void *InsertPos = nullptr; 4197 DependentSizedMatrixType *Canon = 4198 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4199 4200 if (!Canon) { 4201 Canon = new (*this, alignof(DependentSizedMatrixType)) 4202 DependentSizedMatrixType(CanonElementTy, QualType(), RowExpr, 4203 ColumnExpr, AttrLoc); 4204 #ifndef NDEBUG 4205 DependentSizedMatrixType *CanonCheck = 4206 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4207 assert(!CanonCheck && "Dependent-sized matrix canonical type broken"); 4208 #endif 4209 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); 4210 Types.push_back(Canon); 4211 } 4212 4213 // Already have a canonical version of the matrix type 4214 // 4215 // If it exactly matches the requested type, use it directly. 4216 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && 4217 Canon->getRowExpr() == ColumnExpr) 4218 return QualType(Canon, 0); 4219 4220 // Use Canon as the canonical type for newly-built type. 4221 DependentSizedMatrixType *New = new (*this, alignof(DependentSizedMatrixType)) 4222 DependentSizedMatrixType(ElementTy, QualType(Canon, 0), RowExpr, 4223 ColumnExpr, AttrLoc); 4224 Types.push_back(New); 4225 return QualType(New, 0); 4226 } 4227 4228 QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, 4229 Expr *AddrSpaceExpr, 4230 SourceLocation AttrLoc) const { 4231 assert(AddrSpaceExpr->isInstantiationDependent()); 4232 4233 QualType canonPointeeType = getCanonicalType(PointeeType); 4234 4235 void *insertPos = nullptr; 4236 llvm::FoldingSetNodeID ID; 4237 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, 4238 AddrSpaceExpr); 4239 4240 DependentAddressSpaceType *canonTy = 4241 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); 4242 4243 if (!canonTy) { 4244 canonTy = new (*this, alignof(DependentAddressSpaceType)) 4245 DependentAddressSpaceType(canonPointeeType, QualType(), AddrSpaceExpr, 4246 AttrLoc); 4247 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); 4248 Types.push_back(canonTy); 4249 } 4250 4251 if (canonPointeeType == PointeeType && 4252 canonTy->getAddrSpaceExpr() == AddrSpaceExpr) 4253 return QualType(canonTy, 0); 4254 4255 auto *sugaredType = new (*this, alignof(DependentAddressSpaceType)) 4256 DependentAddressSpaceType(PointeeType, QualType(canonTy, 0), 4257 AddrSpaceExpr, AttrLoc); 4258 Types.push_back(sugaredType); 4259 return QualType(sugaredType, 0); 4260 } 4261 4262 /// Determine whether \p T is canonical as the result type of a function. 4263 static bool isCanonicalResultType(QualType T) { 4264 return T.isCanonical() && 4265 (T.getObjCLifetime() == Qualifiers::OCL_None || 4266 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); 4267 } 4268 4269 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. 4270 QualType 4271 ASTContext::getFunctionNoProtoType(QualType ResultTy, 4272 const FunctionType::ExtInfo &Info) const { 4273 // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter 4274 // functionality creates a function without a prototype regardless of 4275 // language mode (so it makes them even in C++). Once the rewriter has been 4276 // fixed, this assertion can be enabled again. 4277 //assert(!LangOpts.requiresStrictPrototypes() && 4278 // "strict prototypes are disabled"); 4279 4280 // Unique functions, to guarantee there is only one function of a particular 4281 // structure. 4282 llvm::FoldingSetNodeID ID; 4283 FunctionNoProtoType::Profile(ID, ResultTy, Info); 4284 4285 void *InsertPos = nullptr; 4286 if (FunctionNoProtoType *FT = 4287 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 4288 return QualType(FT, 0); 4289 4290 QualType Canonical; 4291 if (!isCanonicalResultType(ResultTy)) { 4292 Canonical = 4293 getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); 4294 4295 // Get the new insert position for the node we care about. 4296 FunctionNoProtoType *NewIP = 4297 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4298 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4299 } 4300 4301 auto *New = new (*this, alignof(FunctionNoProtoType)) 4302 FunctionNoProtoType(ResultTy, Canonical, Info); 4303 Types.push_back(New); 4304 FunctionNoProtoTypes.InsertNode(New, InsertPos); 4305 return QualType(New, 0); 4306 } 4307 4308 CanQualType 4309 ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { 4310 CanQualType CanResultType = getCanonicalType(ResultType); 4311 4312 // Canonical result types do not have ARC lifetime qualifiers. 4313 if (CanResultType.getQualifiers().hasObjCLifetime()) { 4314 Qualifiers Qs = CanResultType.getQualifiers(); 4315 Qs.removeObjCLifetime(); 4316 return CanQualType::CreateUnsafe( 4317 getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); 4318 } 4319 4320 return CanResultType; 4321 } 4322 4323 static bool isCanonicalExceptionSpecification( 4324 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { 4325 if (ESI.Type == EST_None) 4326 return true; 4327 if (!NoexceptInType) 4328 return false; 4329 4330 // C++17 onwards: exception specification is part of the type, as a simple 4331 // boolean "can this function type throw". 4332 if (ESI.Type == EST_BasicNoexcept) 4333 return true; 4334 4335 // A noexcept(expr) specification is (possibly) canonical if expr is 4336 // value-dependent. 4337 if (ESI.Type == EST_DependentNoexcept) 4338 return true; 4339 4340 // A dynamic exception specification is canonical if it only contains pack 4341 // expansions (so we can't tell whether it's non-throwing) and all its 4342 // contained types are canonical. 4343 if (ESI.Type == EST_Dynamic) { 4344 bool AnyPackExpansions = false; 4345 for (QualType ET : ESI.Exceptions) { 4346 if (!ET.isCanonical()) 4347 return false; 4348 if (ET->getAs<PackExpansionType>()) 4349 AnyPackExpansions = true; 4350 } 4351 return AnyPackExpansions; 4352 } 4353 4354 return false; 4355 } 4356 4357 QualType ASTContext::getFunctionTypeInternal( 4358 QualType ResultTy, ArrayRef<QualType> ArgArray, 4359 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { 4360 size_t NumArgs = ArgArray.size(); 4361 4362 // Unique functions, to guarantee there is only one function of a particular 4363 // structure. 4364 llvm::FoldingSetNodeID ID; 4365 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, 4366 *this, true); 4367 4368 QualType Canonical; 4369 bool Unique = false; 4370 4371 void *InsertPos = nullptr; 4372 if (FunctionProtoType *FPT = 4373 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4374 QualType Existing = QualType(FPT, 0); 4375 4376 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse 4377 // it so long as our exception specification doesn't contain a dependent 4378 // noexcept expression, or we're just looking for a canonical type. 4379 // Otherwise, we're going to need to create a type 4380 // sugar node to hold the concrete expression. 4381 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || 4382 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) 4383 return Existing; 4384 4385 // We need a new type sugar node for this one, to hold the new noexcept 4386 // expression. We do no canonicalization here, but that's OK since we don't 4387 // expect to see the same noexcept expression much more than once. 4388 Canonical = getCanonicalType(Existing); 4389 Unique = true; 4390 } 4391 4392 bool NoexceptInType = getLangOpts().CPlusPlus17; 4393 bool IsCanonicalExceptionSpec = 4394 isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); 4395 4396 // Determine whether the type being created is already canonical or not. 4397 bool isCanonical = !Unique && IsCanonicalExceptionSpec && 4398 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; 4399 for (unsigned i = 0; i != NumArgs && isCanonical; ++i) 4400 if (!ArgArray[i].isCanonicalAsParam()) 4401 isCanonical = false; 4402 4403 if (OnlyWantCanonical) 4404 assert(isCanonical && 4405 "given non-canonical parameters constructing canonical type"); 4406 4407 // If this type isn't canonical, get the canonical version of it if we don't 4408 // already have it. The exception spec is only partially part of the 4409 // canonical type, and only in C++17 onwards. 4410 if (!isCanonical && Canonical.isNull()) { 4411 SmallVector<QualType, 16> CanonicalArgs; 4412 CanonicalArgs.reserve(NumArgs); 4413 for (unsigned i = 0; i != NumArgs; ++i) 4414 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); 4415 4416 llvm::SmallVector<QualType, 8> ExceptionTypeStorage; 4417 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; 4418 CanonicalEPI.HasTrailingReturn = false; 4419 4420 if (IsCanonicalExceptionSpec) { 4421 // Exception spec is already OK. 4422 } else if (NoexceptInType) { 4423 switch (EPI.ExceptionSpec.Type) { 4424 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: 4425 // We don't know yet. It shouldn't matter what we pick here; no-one 4426 // should ever look at this. 4427 [[fallthrough]]; 4428 case EST_None: case EST_MSAny: case EST_NoexceptFalse: 4429 CanonicalEPI.ExceptionSpec.Type = EST_None; 4430 break; 4431 4432 // A dynamic exception specification is almost always "not noexcept", 4433 // with the exception that a pack expansion might expand to no types. 4434 case EST_Dynamic: { 4435 bool AnyPacks = false; 4436 for (QualType ET : EPI.ExceptionSpec.Exceptions) { 4437 if (ET->getAs<PackExpansionType>()) 4438 AnyPacks = true; 4439 ExceptionTypeStorage.push_back(getCanonicalType(ET)); 4440 } 4441 if (!AnyPacks) 4442 CanonicalEPI.ExceptionSpec.Type = EST_None; 4443 else { 4444 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; 4445 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; 4446 } 4447 break; 4448 } 4449 4450 case EST_DynamicNone: 4451 case EST_BasicNoexcept: 4452 case EST_NoexceptTrue: 4453 case EST_NoThrow: 4454 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; 4455 break; 4456 4457 case EST_DependentNoexcept: 4458 llvm_unreachable("dependent noexcept is already canonical"); 4459 } 4460 } else { 4461 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); 4462 } 4463 4464 // Adjust the canonical function result type. 4465 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); 4466 Canonical = 4467 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); 4468 4469 // Get the new insert position for the node we care about. 4470 FunctionProtoType *NewIP = 4471 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4472 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4473 } 4474 4475 // Compute the needed size to hold this FunctionProtoType and the 4476 // various trailing objects. 4477 auto ESH = FunctionProtoType::getExceptionSpecSize( 4478 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); 4479 size_t Size = FunctionProtoType::totalSizeToAlloc< 4480 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, 4481 FunctionType::ExceptionType, Expr *, FunctionDecl *, 4482 FunctionProtoType::ExtParameterInfo, Qualifiers>( 4483 NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(), 4484 ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr, 4485 EPI.ExtParameterInfos ? NumArgs : 0, 4486 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); 4487 4488 auto *FTP = (FunctionProtoType *)Allocate(Size, alignof(FunctionProtoType)); 4489 FunctionProtoType::ExtProtoInfo newEPI = EPI; 4490 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); 4491 Types.push_back(FTP); 4492 if (!Unique) 4493 FunctionProtoTypes.InsertNode(FTP, InsertPos); 4494 return QualType(FTP, 0); 4495 } 4496 4497 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { 4498 llvm::FoldingSetNodeID ID; 4499 PipeType::Profile(ID, T, ReadOnly); 4500 4501 void *InsertPos = nullptr; 4502 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) 4503 return QualType(PT, 0); 4504 4505 // If the pipe element type isn't canonical, this won't be a canonical type 4506 // either, so fill in the canonical type field. 4507 QualType Canonical; 4508 if (!T.isCanonical()) { 4509 Canonical = getPipeType(getCanonicalType(T), ReadOnly); 4510 4511 // Get the new insert position for the node we care about. 4512 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); 4513 assert(!NewIP && "Shouldn't be in the map!"); 4514 (void)NewIP; 4515 } 4516 auto *New = new (*this, alignof(PipeType)) PipeType(T, Canonical, ReadOnly); 4517 Types.push_back(New); 4518 PipeTypes.InsertNode(New, InsertPos); 4519 return QualType(New, 0); 4520 } 4521 4522 QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { 4523 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. 4524 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) 4525 : Ty; 4526 } 4527 4528 QualType ASTContext::getReadPipeType(QualType T) const { 4529 return getPipeType(T, true); 4530 } 4531 4532 QualType ASTContext::getWritePipeType(QualType T) const { 4533 return getPipeType(T, false); 4534 } 4535 4536 QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const { 4537 llvm::FoldingSetNodeID ID; 4538 BitIntType::Profile(ID, IsUnsigned, NumBits); 4539 4540 void *InsertPos = nullptr; 4541 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4542 return QualType(EIT, 0); 4543 4544 auto *New = new (*this, alignof(BitIntType)) BitIntType(IsUnsigned, NumBits); 4545 BitIntTypes.InsertNode(New, InsertPos); 4546 Types.push_back(New); 4547 return QualType(New, 0); 4548 } 4549 4550 QualType ASTContext::getDependentBitIntType(bool IsUnsigned, 4551 Expr *NumBitsExpr) const { 4552 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); 4553 llvm::FoldingSetNodeID ID; 4554 DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); 4555 4556 void *InsertPos = nullptr; 4557 if (DependentBitIntType *Existing = 4558 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4559 return QualType(Existing, 0); 4560 4561 auto *New = new (*this, alignof(DependentBitIntType)) 4562 DependentBitIntType(IsUnsigned, NumBitsExpr); 4563 DependentBitIntTypes.InsertNode(New, InsertPos); 4564 4565 Types.push_back(New); 4566 return QualType(New, 0); 4567 } 4568 4569 #ifndef NDEBUG 4570 static bool NeedsInjectedClassNameType(const RecordDecl *D) { 4571 if (!isa<CXXRecordDecl>(D)) return false; 4572 const auto *RD = cast<CXXRecordDecl>(D); 4573 if (isa<ClassTemplatePartialSpecializationDecl>(RD)) 4574 return true; 4575 if (RD->getDescribedClassTemplate() && 4576 !isa<ClassTemplateSpecializationDecl>(RD)) 4577 return true; 4578 return false; 4579 } 4580 #endif 4581 4582 /// getInjectedClassNameType - Return the unique reference to the 4583 /// injected class name type for the specified templated declaration. 4584 QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, 4585 QualType TST) const { 4586 assert(NeedsInjectedClassNameType(Decl)); 4587 if (Decl->TypeForDecl) { 4588 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4589 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { 4590 assert(PrevDecl->TypeForDecl && "previous declaration has no type"); 4591 Decl->TypeForDecl = PrevDecl->TypeForDecl; 4592 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4593 } else { 4594 Type *newType = new (*this, alignof(InjectedClassNameType)) 4595 InjectedClassNameType(Decl, TST); 4596 Decl->TypeForDecl = newType; 4597 Types.push_back(newType); 4598 } 4599 return QualType(Decl->TypeForDecl, 0); 4600 } 4601 4602 /// getTypeDeclType - Return the unique reference to the type for the 4603 /// specified type declaration. 4604 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { 4605 assert(Decl && "Passed null for Decl param"); 4606 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); 4607 4608 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl)) 4609 return getTypedefType(Typedef); 4610 4611 assert(!isa<TemplateTypeParmDecl>(Decl) && 4612 "Template type parameter types are always available."); 4613 4614 if (const auto *Record = dyn_cast<RecordDecl>(Decl)) { 4615 assert(Record->isFirstDecl() && "struct/union has previous declaration"); 4616 assert(!NeedsInjectedClassNameType(Record)); 4617 return getRecordType(Record); 4618 } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) { 4619 assert(Enum->isFirstDecl() && "enum has previous declaration"); 4620 return getEnumType(Enum); 4621 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { 4622 return getUnresolvedUsingType(Using); 4623 } else 4624 llvm_unreachable("TypeDecl without a type?"); 4625 4626 return QualType(Decl->TypeForDecl, 0); 4627 } 4628 4629 /// getTypedefType - Return the unique reference to the type for the 4630 /// specified typedef name decl. 4631 QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, 4632 QualType Underlying) const { 4633 if (!Decl->TypeForDecl) { 4634 if (Underlying.isNull()) 4635 Underlying = Decl->getUnderlyingType(); 4636 auto *NewType = new (*this, alignof(TypedefType)) TypedefType( 4637 Type::Typedef, Decl, QualType(), getCanonicalType(Underlying)); 4638 Decl->TypeForDecl = NewType; 4639 Types.push_back(NewType); 4640 return QualType(NewType, 0); 4641 } 4642 if (Underlying.isNull() || Decl->getUnderlyingType() == Underlying) 4643 return QualType(Decl->TypeForDecl, 0); 4644 assert(hasSameType(Decl->getUnderlyingType(), Underlying)); 4645 4646 llvm::FoldingSetNodeID ID; 4647 TypedefType::Profile(ID, Decl, Underlying); 4648 4649 void *InsertPos = nullptr; 4650 if (TypedefType *T = TypedefTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4651 assert(!T->typeMatchesDecl() && 4652 "non-divergent case should be handled with TypeDecl"); 4653 return QualType(T, 0); 4654 } 4655 4656 void *Mem = Allocate(TypedefType::totalSizeToAlloc<QualType>(true), 4657 alignof(TypedefType)); 4658 auto *NewType = new (Mem) TypedefType(Type::Typedef, Decl, Underlying, 4659 getCanonicalType(Underlying)); 4660 TypedefTypes.InsertNode(NewType, InsertPos); 4661 Types.push_back(NewType); 4662 return QualType(NewType, 0); 4663 } 4664 4665 QualType ASTContext::getUsingType(const UsingShadowDecl *Found, 4666 QualType Underlying) const { 4667 llvm::FoldingSetNodeID ID; 4668 UsingType::Profile(ID, Found, Underlying); 4669 4670 void *InsertPos = nullptr; 4671 if (UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos)) 4672 return QualType(T, 0); 4673 4674 const Type *TypeForDecl = 4675 cast<TypeDecl>(Found->getTargetDecl())->getTypeForDecl(); 4676 4677 assert(!Underlying.hasLocalQualifiers()); 4678 QualType Canon = Underlying->getCanonicalTypeInternal(); 4679 assert(TypeForDecl->getCanonicalTypeInternal() == Canon); 4680 4681 if (Underlying.getTypePtr() == TypeForDecl) 4682 Underlying = QualType(); 4683 void *Mem = 4684 Allocate(UsingType::totalSizeToAlloc<QualType>(!Underlying.isNull()), 4685 alignof(UsingType)); 4686 UsingType *NewType = new (Mem) UsingType(Found, Underlying, Canon); 4687 Types.push_back(NewType); 4688 UsingTypes.InsertNode(NewType, InsertPos); 4689 return QualType(NewType, 0); 4690 } 4691 4692 QualType ASTContext::getRecordType(const RecordDecl *Decl) const { 4693 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4694 4695 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) 4696 if (PrevDecl->TypeForDecl) 4697 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4698 4699 auto *newType = new (*this, alignof(RecordType)) RecordType(Decl); 4700 Decl->TypeForDecl = newType; 4701 Types.push_back(newType); 4702 return QualType(newType, 0); 4703 } 4704 4705 QualType ASTContext::getEnumType(const EnumDecl *Decl) const { 4706 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4707 4708 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) 4709 if (PrevDecl->TypeForDecl) 4710 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4711 4712 auto *newType = new (*this, alignof(EnumType)) EnumType(Decl); 4713 Decl->TypeForDecl = newType; 4714 Types.push_back(newType); 4715 return QualType(newType, 0); 4716 } 4717 4718 QualType ASTContext::getUnresolvedUsingType( 4719 const UnresolvedUsingTypenameDecl *Decl) const { 4720 if (Decl->TypeForDecl) 4721 return QualType(Decl->TypeForDecl, 0); 4722 4723 if (const UnresolvedUsingTypenameDecl *CanonicalDecl = 4724 Decl->getCanonicalDecl()) 4725 if (CanonicalDecl->TypeForDecl) 4726 return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0); 4727 4728 Type *newType = 4729 new (*this, alignof(UnresolvedUsingType)) UnresolvedUsingType(Decl); 4730 Decl->TypeForDecl = newType; 4731 Types.push_back(newType); 4732 return QualType(newType, 0); 4733 } 4734 4735 QualType ASTContext::getAttributedType(attr::Kind attrKind, 4736 QualType modifiedType, 4737 QualType equivalentType) const { 4738 llvm::FoldingSetNodeID id; 4739 AttributedType::Profile(id, attrKind, modifiedType, equivalentType); 4740 4741 void *insertPos = nullptr; 4742 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); 4743 if (type) return QualType(type, 0); 4744 4745 QualType canon = getCanonicalType(equivalentType); 4746 type = new (*this, alignof(AttributedType)) 4747 AttributedType(canon, attrKind, modifiedType, equivalentType); 4748 4749 Types.push_back(type); 4750 AttributedTypes.InsertNode(type, insertPos); 4751 4752 return QualType(type, 0); 4753 } 4754 4755 QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr, 4756 QualType Wrapped) { 4757 llvm::FoldingSetNodeID ID; 4758 BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr); 4759 4760 void *InsertPos = nullptr; 4761 BTFTagAttributedType *Ty = 4762 BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos); 4763 if (Ty) 4764 return QualType(Ty, 0); 4765 4766 QualType Canon = getCanonicalType(Wrapped); 4767 Ty = new (*this, alignof(BTFTagAttributedType)) 4768 BTFTagAttributedType(Canon, Wrapped, BTFAttr); 4769 4770 Types.push_back(Ty); 4771 BTFTagAttributedTypes.InsertNode(Ty, InsertPos); 4772 4773 return QualType(Ty, 0); 4774 } 4775 4776 /// Retrieve a substitution-result type. 4777 QualType ASTContext::getSubstTemplateTypeParmType( 4778 QualType Replacement, Decl *AssociatedDecl, unsigned Index, 4779 std::optional<unsigned> PackIndex) const { 4780 llvm::FoldingSetNodeID ID; 4781 SubstTemplateTypeParmType::Profile(ID, Replacement, AssociatedDecl, Index, 4782 PackIndex); 4783 void *InsertPos = nullptr; 4784 SubstTemplateTypeParmType *SubstParm = 4785 SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4786 4787 if (!SubstParm) { 4788 void *Mem = Allocate(SubstTemplateTypeParmType::totalSizeToAlloc<QualType>( 4789 !Replacement.isCanonical()), 4790 alignof(SubstTemplateTypeParmType)); 4791 SubstParm = new (Mem) SubstTemplateTypeParmType(Replacement, AssociatedDecl, 4792 Index, PackIndex); 4793 Types.push_back(SubstParm); 4794 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 4795 } 4796 4797 return QualType(SubstParm, 0); 4798 } 4799 4800 /// Retrieve a 4801 QualType 4802 ASTContext::getSubstTemplateTypeParmPackType(Decl *AssociatedDecl, 4803 unsigned Index, bool Final, 4804 const TemplateArgument &ArgPack) { 4805 #ifndef NDEBUG 4806 for (const auto &P : ArgPack.pack_elements()) 4807 assert(P.getKind() == TemplateArgument::Type && "Pack contains a non-type"); 4808 #endif 4809 4810 llvm::FoldingSetNodeID ID; 4811 SubstTemplateTypeParmPackType::Profile(ID, AssociatedDecl, Index, Final, 4812 ArgPack); 4813 void *InsertPos = nullptr; 4814 if (SubstTemplateTypeParmPackType *SubstParm = 4815 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) 4816 return QualType(SubstParm, 0); 4817 4818 QualType Canon; 4819 { 4820 TemplateArgument CanonArgPack = getCanonicalTemplateArgument(ArgPack); 4821 if (!AssociatedDecl->isCanonicalDecl() || 4822 !CanonArgPack.structurallyEquals(ArgPack)) { 4823 Canon = getSubstTemplateTypeParmPackType( 4824 AssociatedDecl->getCanonicalDecl(), Index, Final, CanonArgPack); 4825 [[maybe_unused]] const auto *Nothing = 4826 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); 4827 assert(!Nothing); 4828 } 4829 } 4830 4831 auto *SubstParm = new (*this, alignof(SubstTemplateTypeParmPackType)) 4832 SubstTemplateTypeParmPackType(Canon, AssociatedDecl, Index, Final, 4833 ArgPack); 4834 Types.push_back(SubstParm); 4835 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); 4836 return QualType(SubstParm, 0); 4837 } 4838 4839 /// Retrieve the template type parameter type for a template 4840 /// parameter or parameter pack with the given depth, index, and (optionally) 4841 /// name. 4842 QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, 4843 bool ParameterPack, 4844 TemplateTypeParmDecl *TTPDecl) const { 4845 llvm::FoldingSetNodeID ID; 4846 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); 4847 void *InsertPos = nullptr; 4848 TemplateTypeParmType *TypeParm 4849 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4850 4851 if (TypeParm) 4852 return QualType(TypeParm, 0); 4853 4854 if (TTPDecl) { 4855 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); 4856 TypeParm = new (*this, alignof(TemplateTypeParmType)) 4857 TemplateTypeParmType(TTPDecl, Canon); 4858 4859 TemplateTypeParmType *TypeCheck 4860 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4861 assert(!TypeCheck && "Template type parameter canonical type broken"); 4862 (void)TypeCheck; 4863 } else 4864 TypeParm = new (*this, alignof(TemplateTypeParmType)) 4865 TemplateTypeParmType(Depth, Index, ParameterPack); 4866 4867 Types.push_back(TypeParm); 4868 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); 4869 4870 return QualType(TypeParm, 0); 4871 } 4872 4873 TypeSourceInfo * 4874 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, 4875 SourceLocation NameLoc, 4876 const TemplateArgumentListInfo &Args, 4877 QualType Underlying) const { 4878 assert(!Name.getAsDependentTemplateName() && 4879 "No dependent template names here!"); 4880 QualType TST = 4881 getTemplateSpecializationType(Name, Args.arguments(), Underlying); 4882 4883 TypeSourceInfo *DI = CreateTypeSourceInfo(TST); 4884 TemplateSpecializationTypeLoc TL = 4885 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); 4886 TL.setTemplateKeywordLoc(SourceLocation()); 4887 TL.setTemplateNameLoc(NameLoc); 4888 TL.setLAngleLoc(Args.getLAngleLoc()); 4889 TL.setRAngleLoc(Args.getRAngleLoc()); 4890 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) 4891 TL.setArgLocInfo(i, Args[i].getLocInfo()); 4892 return DI; 4893 } 4894 4895 QualType 4896 ASTContext::getTemplateSpecializationType(TemplateName Template, 4897 ArrayRef<TemplateArgumentLoc> Args, 4898 QualType Underlying) const { 4899 assert(!Template.getAsDependentTemplateName() && 4900 "No dependent template names here!"); 4901 4902 SmallVector<TemplateArgument, 4> ArgVec; 4903 ArgVec.reserve(Args.size()); 4904 for (const TemplateArgumentLoc &Arg : Args) 4905 ArgVec.push_back(Arg.getArgument()); 4906 4907 return getTemplateSpecializationType(Template, ArgVec, Underlying); 4908 } 4909 4910 #ifndef NDEBUG 4911 static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) { 4912 for (const TemplateArgument &Arg : Args) 4913 if (Arg.isPackExpansion()) 4914 return true; 4915 4916 return true; 4917 } 4918 #endif 4919 4920 QualType 4921 ASTContext::getTemplateSpecializationType(TemplateName Template, 4922 ArrayRef<TemplateArgument> Args, 4923 QualType Underlying) const { 4924 assert(!Template.getAsDependentTemplateName() && 4925 "No dependent template names here!"); 4926 // Look through qualified template names. 4927 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4928 Template = QTN->getUnderlyingTemplate(); 4929 4930 const auto *TD = Template.getAsTemplateDecl(); 4931 bool IsTypeAlias = TD && TD->isTypeAlias(); 4932 QualType CanonType; 4933 if (!Underlying.isNull()) 4934 CanonType = getCanonicalType(Underlying); 4935 else { 4936 // We can get here with an alias template when the specialization contains 4937 // a pack expansion that does not match up with a parameter pack. 4938 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) && 4939 "Caller must compute aliased type"); 4940 IsTypeAlias = false; 4941 CanonType = getCanonicalTemplateSpecializationType(Template, Args); 4942 } 4943 4944 // Allocate the (non-canonical) template specialization type, but don't 4945 // try to unique it: these types typically have location information that 4946 // we don't unique and don't want to lose. 4947 void *Mem = Allocate(sizeof(TemplateSpecializationType) + 4948 sizeof(TemplateArgument) * Args.size() + 4949 (IsTypeAlias ? sizeof(QualType) : 0), 4950 alignof(TemplateSpecializationType)); 4951 auto *Spec 4952 = new (Mem) TemplateSpecializationType(Template, Args, CanonType, 4953 IsTypeAlias ? Underlying : QualType()); 4954 4955 Types.push_back(Spec); 4956 return QualType(Spec, 0); 4957 } 4958 4959 QualType ASTContext::getCanonicalTemplateSpecializationType( 4960 TemplateName Template, ArrayRef<TemplateArgument> Args) const { 4961 assert(!Template.getAsDependentTemplateName() && 4962 "No dependent template names here!"); 4963 4964 // Look through qualified template names. 4965 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4966 Template = TemplateName(QTN->getUnderlyingTemplate()); 4967 4968 // Build the canonical template specialization type. 4969 TemplateName CanonTemplate = getCanonicalTemplateName(Template); 4970 bool AnyNonCanonArgs = false; 4971 auto CanonArgs = 4972 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 4973 4974 // Determine whether this canonical template specialization type already 4975 // exists. 4976 llvm::FoldingSetNodeID ID; 4977 TemplateSpecializationType::Profile(ID, CanonTemplate, 4978 CanonArgs, *this); 4979 4980 void *InsertPos = nullptr; 4981 TemplateSpecializationType *Spec 4982 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 4983 4984 if (!Spec) { 4985 // Allocate a new canonical template specialization type. 4986 void *Mem = Allocate((sizeof(TemplateSpecializationType) + 4987 sizeof(TemplateArgument) * CanonArgs.size()), 4988 alignof(TemplateSpecializationType)); 4989 Spec = new (Mem) TemplateSpecializationType(CanonTemplate, 4990 CanonArgs, 4991 QualType(), QualType()); 4992 Types.push_back(Spec); 4993 TemplateSpecializationTypes.InsertNode(Spec, InsertPos); 4994 } 4995 4996 assert(Spec->isDependentType() && 4997 "Non-dependent template-id type must have a canonical type"); 4998 return QualType(Spec, 0); 4999 } 5000 5001 QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, 5002 NestedNameSpecifier *NNS, 5003 QualType NamedType, 5004 TagDecl *OwnedTagDecl) const { 5005 llvm::FoldingSetNodeID ID; 5006 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); 5007 5008 void *InsertPos = nullptr; 5009 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5010 if (T) 5011 return QualType(T, 0); 5012 5013 QualType Canon = NamedType; 5014 if (!Canon.isCanonical()) { 5015 Canon = getCanonicalType(NamedType); 5016 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 5017 assert(!CheckT && "Elaborated canonical type broken"); 5018 (void)CheckT; 5019 } 5020 5021 void *Mem = 5022 Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), 5023 alignof(ElaboratedType)); 5024 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); 5025 5026 Types.push_back(T); 5027 ElaboratedTypes.InsertNode(T, InsertPos); 5028 return QualType(T, 0); 5029 } 5030 5031 QualType 5032 ASTContext::getParenType(QualType InnerType) const { 5033 llvm::FoldingSetNodeID ID; 5034 ParenType::Profile(ID, InnerType); 5035 5036 void *InsertPos = nullptr; 5037 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5038 if (T) 5039 return QualType(T, 0); 5040 5041 QualType Canon = InnerType; 5042 if (!Canon.isCanonical()) { 5043 Canon = getCanonicalType(InnerType); 5044 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 5045 assert(!CheckT && "Paren canonical type broken"); 5046 (void)CheckT; 5047 } 5048 5049 T = new (*this, alignof(ParenType)) ParenType(InnerType, Canon); 5050 Types.push_back(T); 5051 ParenTypes.InsertNode(T, InsertPos); 5052 return QualType(T, 0); 5053 } 5054 5055 QualType 5056 ASTContext::getMacroQualifiedType(QualType UnderlyingTy, 5057 const IdentifierInfo *MacroII) const { 5058 QualType Canon = UnderlyingTy; 5059 if (!Canon.isCanonical()) 5060 Canon = getCanonicalType(UnderlyingTy); 5061 5062 auto *newType = new (*this, alignof(MacroQualifiedType)) 5063 MacroQualifiedType(UnderlyingTy, Canon, MacroII); 5064 Types.push_back(newType); 5065 return QualType(newType, 0); 5066 } 5067 5068 QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, 5069 NestedNameSpecifier *NNS, 5070 const IdentifierInfo *Name, 5071 QualType Canon) const { 5072 if (Canon.isNull()) { 5073 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5074 if (CanonNNS != NNS) 5075 Canon = getDependentNameType(Keyword, CanonNNS, Name); 5076 } 5077 5078 llvm::FoldingSetNodeID ID; 5079 DependentNameType::Profile(ID, Keyword, NNS, Name); 5080 5081 void *InsertPos = nullptr; 5082 DependentNameType *T 5083 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); 5084 if (T) 5085 return QualType(T, 0); 5086 5087 T = new (*this, alignof(DependentNameType)) 5088 DependentNameType(Keyword, NNS, Name, Canon); 5089 Types.push_back(T); 5090 DependentNameTypes.InsertNode(T, InsertPos); 5091 return QualType(T, 0); 5092 } 5093 5094 QualType ASTContext::getDependentTemplateSpecializationType( 5095 ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS, 5096 const IdentifierInfo *Name, ArrayRef<TemplateArgumentLoc> Args) const { 5097 // TODO: avoid this copy 5098 SmallVector<TemplateArgument, 16> ArgCopy; 5099 for (unsigned I = 0, E = Args.size(); I != E; ++I) 5100 ArgCopy.push_back(Args[I].getArgument()); 5101 return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); 5102 } 5103 5104 QualType 5105 ASTContext::getDependentTemplateSpecializationType( 5106 ElaboratedTypeKeyword Keyword, 5107 NestedNameSpecifier *NNS, 5108 const IdentifierInfo *Name, 5109 ArrayRef<TemplateArgument> Args) const { 5110 assert((!NNS || NNS->isDependent()) && 5111 "nested-name-specifier must be dependent"); 5112 5113 llvm::FoldingSetNodeID ID; 5114 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, 5115 Name, Args); 5116 5117 void *InsertPos = nullptr; 5118 DependentTemplateSpecializationType *T 5119 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5120 if (T) 5121 return QualType(T, 0); 5122 5123 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5124 5125 ElaboratedTypeKeyword CanonKeyword = Keyword; 5126 if (Keyword == ElaboratedTypeKeyword::None) 5127 CanonKeyword = ElaboratedTypeKeyword::Typename; 5128 5129 bool AnyNonCanonArgs = false; 5130 auto CanonArgs = 5131 ::getCanonicalTemplateArguments(*this, Args, AnyNonCanonArgs); 5132 5133 QualType Canon; 5134 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { 5135 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, 5136 Name, 5137 CanonArgs); 5138 5139 // Find the insert position again. 5140 [[maybe_unused]] auto *Nothing = 5141 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5142 assert(!Nothing && "canonical type broken"); 5143 } 5144 5145 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + 5146 sizeof(TemplateArgument) * Args.size()), 5147 alignof(DependentTemplateSpecializationType)); 5148 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, 5149 Name, Args, Canon); 5150 Types.push_back(T); 5151 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); 5152 return QualType(T, 0); 5153 } 5154 5155 TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { 5156 TemplateArgument Arg; 5157 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { 5158 QualType ArgType = getTypeDeclType(TTP); 5159 if (TTP->isParameterPack()) 5160 ArgType = getPackExpansionType(ArgType, std::nullopt); 5161 5162 Arg = TemplateArgument(ArgType); 5163 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { 5164 QualType T = 5165 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this); 5166 // For class NTTPs, ensure we include the 'const' so the type matches that 5167 // of a real template argument. 5168 // FIXME: It would be more faithful to model this as something like an 5169 // lvalue-to-rvalue conversion applied to a const-qualified lvalue. 5170 if (T->isRecordType()) 5171 T.addConst(); 5172 Expr *E = new (*this) DeclRefExpr( 5173 *this, NTTP, /*RefersToEnclosingVariableOrCapture*/ false, T, 5174 Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); 5175 5176 if (NTTP->isParameterPack()) 5177 E = new (*this) 5178 PackExpansionExpr(DependentTy, E, NTTP->getLocation(), std::nullopt); 5179 Arg = TemplateArgument(E); 5180 } else { 5181 auto *TTP = cast<TemplateTemplateParmDecl>(Param); 5182 if (TTP->isParameterPack()) 5183 Arg = TemplateArgument(TemplateName(TTP), std::optional<unsigned>()); 5184 else 5185 Arg = TemplateArgument(TemplateName(TTP)); 5186 } 5187 5188 if (Param->isTemplateParameterPack()) 5189 Arg = TemplateArgument::CreatePackCopy(*this, Arg); 5190 5191 return Arg; 5192 } 5193 5194 void 5195 ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, 5196 SmallVectorImpl<TemplateArgument> &Args) { 5197 Args.reserve(Args.size() + Params->size()); 5198 5199 for (NamedDecl *Param : *Params) 5200 Args.push_back(getInjectedTemplateArg(Param)); 5201 } 5202 5203 QualType ASTContext::getPackExpansionType(QualType Pattern, 5204 std::optional<unsigned> NumExpansions, 5205 bool ExpectPackInType) { 5206 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && 5207 "Pack expansions must expand one or more parameter packs"); 5208 5209 llvm::FoldingSetNodeID ID; 5210 PackExpansionType::Profile(ID, Pattern, NumExpansions); 5211 5212 void *InsertPos = nullptr; 5213 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5214 if (T) 5215 return QualType(T, 0); 5216 5217 QualType Canon; 5218 if (!Pattern.isCanonical()) { 5219 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, 5220 /*ExpectPackInType=*/false); 5221 5222 // Find the insert position again, in case we inserted an element into 5223 // PackExpansionTypes and invalidated our insert position. 5224 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5225 } 5226 5227 T = new (*this, alignof(PackExpansionType)) 5228 PackExpansionType(Pattern, Canon, NumExpansions); 5229 Types.push_back(T); 5230 PackExpansionTypes.InsertNode(T, InsertPos); 5231 return QualType(T, 0); 5232 } 5233 5234 /// CmpProtocolNames - Comparison predicate for sorting protocols 5235 /// alphabetically. 5236 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, 5237 ObjCProtocolDecl *const *RHS) { 5238 return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); 5239 } 5240 5241 static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) { 5242 if (Protocols.empty()) return true; 5243 5244 if (Protocols[0]->getCanonicalDecl() != Protocols[0]) 5245 return false; 5246 5247 for (unsigned i = 1; i != Protocols.size(); ++i) 5248 if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || 5249 Protocols[i]->getCanonicalDecl() != Protocols[i]) 5250 return false; 5251 return true; 5252 } 5253 5254 static void 5255 SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) { 5256 // Sort protocols, keyed by name. 5257 llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); 5258 5259 // Canonicalize. 5260 for (ObjCProtocolDecl *&P : Protocols) 5261 P = P->getCanonicalDecl(); 5262 5263 // Remove duplicates. 5264 auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); 5265 Protocols.erase(ProtocolsEnd, Protocols.end()); 5266 } 5267 5268 QualType ASTContext::getObjCObjectType(QualType BaseType, 5269 ObjCProtocolDecl * const *Protocols, 5270 unsigned NumProtocols) const { 5271 return getObjCObjectType(BaseType, {}, 5272 llvm::ArrayRef(Protocols, NumProtocols), 5273 /*isKindOf=*/false); 5274 } 5275 5276 QualType ASTContext::getObjCObjectType( 5277 QualType baseType, 5278 ArrayRef<QualType> typeArgs, 5279 ArrayRef<ObjCProtocolDecl *> protocols, 5280 bool isKindOf) const { 5281 // If the base type is an interface and there aren't any protocols or 5282 // type arguments to add, then the interface type will do just fine. 5283 if (typeArgs.empty() && protocols.empty() && !isKindOf && 5284 isa<ObjCInterfaceType>(baseType)) 5285 return baseType; 5286 5287 // Look in the folding set for an existing type. 5288 llvm::FoldingSetNodeID ID; 5289 ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); 5290 void *InsertPos = nullptr; 5291 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) 5292 return QualType(QT, 0); 5293 5294 // Determine the type arguments to be used for canonicalization, 5295 // which may be explicitly specified here or written on the base 5296 // type. 5297 ArrayRef<QualType> effectiveTypeArgs = typeArgs; 5298 if (effectiveTypeArgs.empty()) { 5299 if (const auto *baseObject = baseType->getAs<ObjCObjectType>()) 5300 effectiveTypeArgs = baseObject->getTypeArgs(); 5301 } 5302 5303 // Build the canonical type, which has the canonical base type and a 5304 // sorted-and-uniqued list of protocols and the type arguments 5305 // canonicalized. 5306 QualType canonical; 5307 bool typeArgsAreCanonical = llvm::all_of( 5308 effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); }); 5309 bool protocolsSorted = areSortedAndUniqued(protocols); 5310 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { 5311 // Determine the canonical type arguments. 5312 ArrayRef<QualType> canonTypeArgs; 5313 SmallVector<QualType, 4> canonTypeArgsVec; 5314 if (!typeArgsAreCanonical) { 5315 canonTypeArgsVec.reserve(effectiveTypeArgs.size()); 5316 for (auto typeArg : effectiveTypeArgs) 5317 canonTypeArgsVec.push_back(getCanonicalType(typeArg)); 5318 canonTypeArgs = canonTypeArgsVec; 5319 } else { 5320 canonTypeArgs = effectiveTypeArgs; 5321 } 5322 5323 ArrayRef<ObjCProtocolDecl *> canonProtocols; 5324 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec; 5325 if (!protocolsSorted) { 5326 canonProtocolsVec.append(protocols.begin(), protocols.end()); 5327 SortAndUniqueProtocols(canonProtocolsVec); 5328 canonProtocols = canonProtocolsVec; 5329 } else { 5330 canonProtocols = protocols; 5331 } 5332 5333 canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, 5334 canonProtocols, isKindOf); 5335 5336 // Regenerate InsertPos. 5337 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); 5338 } 5339 5340 unsigned size = sizeof(ObjCObjectTypeImpl); 5341 size += typeArgs.size() * sizeof(QualType); 5342 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5343 void *mem = Allocate(size, alignof(ObjCObjectTypeImpl)); 5344 auto *T = 5345 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, 5346 isKindOf); 5347 5348 Types.push_back(T); 5349 ObjCObjectTypes.InsertNode(T, InsertPos); 5350 return QualType(T, 0); 5351 } 5352 5353 /// Apply Objective-C protocol qualifiers to the given type. 5354 /// If this is for the canonical type of a type parameter, we can apply 5355 /// protocol qualifiers on the ObjCObjectPointerType. 5356 QualType 5357 ASTContext::applyObjCProtocolQualifiers(QualType type, 5358 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError, 5359 bool allowOnPointerType) const { 5360 hasError = false; 5361 5362 if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) { 5363 return getObjCTypeParamType(objT->getDecl(), protocols); 5364 } 5365 5366 // Apply protocol qualifiers to ObjCObjectPointerType. 5367 if (allowOnPointerType) { 5368 if (const auto *objPtr = 5369 dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) { 5370 const ObjCObjectType *objT = objPtr->getObjectType(); 5371 // Merge protocol lists and construct ObjCObjectType. 5372 SmallVector<ObjCProtocolDecl*, 8> protocolsVec; 5373 protocolsVec.append(objT->qual_begin(), 5374 objT->qual_end()); 5375 protocolsVec.append(protocols.begin(), protocols.end()); 5376 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec; 5377 type = getObjCObjectType( 5378 objT->getBaseType(), 5379 objT->getTypeArgsAsWritten(), 5380 protocols, 5381 objT->isKindOfTypeAsWritten()); 5382 return getObjCObjectPointerType(type); 5383 } 5384 } 5385 5386 // Apply protocol qualifiers to ObjCObjectType. 5387 if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){ 5388 // FIXME: Check for protocols to which the class type is already 5389 // known to conform. 5390 5391 return getObjCObjectType(objT->getBaseType(), 5392 objT->getTypeArgsAsWritten(), 5393 protocols, 5394 objT->isKindOfTypeAsWritten()); 5395 } 5396 5397 // If the canonical type is ObjCObjectType, ... 5398 if (type->isObjCObjectType()) { 5399 // Silently overwrite any existing protocol qualifiers. 5400 // TODO: determine whether that's the right thing to do. 5401 5402 // FIXME: Check for protocols to which the class type is already 5403 // known to conform. 5404 return getObjCObjectType(type, {}, protocols, false); 5405 } 5406 5407 // id<protocol-list> 5408 if (type->isObjCIdType()) { 5409 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5410 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, 5411 objPtr->isKindOfType()); 5412 return getObjCObjectPointerType(type); 5413 } 5414 5415 // Class<protocol-list> 5416 if (type->isObjCClassType()) { 5417 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5418 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, 5419 objPtr->isKindOfType()); 5420 return getObjCObjectPointerType(type); 5421 } 5422 5423 hasError = true; 5424 return type; 5425 } 5426 5427 QualType 5428 ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, 5429 ArrayRef<ObjCProtocolDecl *> protocols) const { 5430 // Look in the folding set for an existing type. 5431 llvm::FoldingSetNodeID ID; 5432 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); 5433 void *InsertPos = nullptr; 5434 if (ObjCTypeParamType *TypeParam = 5435 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) 5436 return QualType(TypeParam, 0); 5437 5438 // We canonicalize to the underlying type. 5439 QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); 5440 if (!protocols.empty()) { 5441 // Apply the protocol qualifers. 5442 bool hasError; 5443 Canonical = getCanonicalType(applyObjCProtocolQualifiers( 5444 Canonical, protocols, hasError, true /*allowOnPointerType*/)); 5445 assert(!hasError && "Error when apply protocol qualifier to bound type"); 5446 } 5447 5448 unsigned size = sizeof(ObjCTypeParamType); 5449 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5450 void *mem = Allocate(size, alignof(ObjCTypeParamType)); 5451 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); 5452 5453 Types.push_back(newType); 5454 ObjCTypeParamTypes.InsertNode(newType, InsertPos); 5455 return QualType(newType, 0); 5456 } 5457 5458 void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, 5459 ObjCTypeParamDecl *New) const { 5460 New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); 5461 // Update TypeForDecl after updating TypeSourceInfo. 5462 auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl()); 5463 SmallVector<ObjCProtocolDecl *, 8> protocols; 5464 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); 5465 QualType UpdatedTy = getObjCTypeParamType(New, protocols); 5466 New->setTypeForDecl(UpdatedTy.getTypePtr()); 5467 } 5468 5469 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's 5470 /// protocol list adopt all protocols in QT's qualified-id protocol 5471 /// list. 5472 bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, 5473 ObjCInterfaceDecl *IC) { 5474 if (!QT->isObjCQualifiedIdType()) 5475 return false; 5476 5477 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) { 5478 // If both the right and left sides have qualifiers. 5479 for (auto *Proto : OPT->quals()) { 5480 if (!IC->ClassImplementsProtocol(Proto, false)) 5481 return false; 5482 } 5483 return true; 5484 } 5485 return false; 5486 } 5487 5488 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in 5489 /// QT's qualified-id protocol list adopt all protocols in IDecl's list 5490 /// of protocols. 5491 bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, 5492 ObjCInterfaceDecl *IDecl) { 5493 if (!QT->isObjCQualifiedIdType()) 5494 return false; 5495 const auto *OPT = QT->getAs<ObjCObjectPointerType>(); 5496 if (!OPT) 5497 return false; 5498 if (!IDecl->hasDefinition()) 5499 return false; 5500 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols; 5501 CollectInheritedProtocols(IDecl, InheritedProtocols); 5502 if (InheritedProtocols.empty()) 5503 return false; 5504 // Check that if every protocol in list of id<plist> conforms to a protocol 5505 // of IDecl's, then bridge casting is ok. 5506 bool Conforms = false; 5507 for (auto *Proto : OPT->quals()) { 5508 Conforms = false; 5509 for (auto *PI : InheritedProtocols) { 5510 if (ProtocolCompatibleWithProtocol(Proto, PI)) { 5511 Conforms = true; 5512 break; 5513 } 5514 } 5515 if (!Conforms) 5516 break; 5517 } 5518 if (Conforms) 5519 return true; 5520 5521 for (auto *PI : InheritedProtocols) { 5522 // If both the right and left sides have qualifiers. 5523 bool Adopts = false; 5524 for (auto *Proto : OPT->quals()) { 5525 // return 'true' if 'PI' is in the inheritance hierarchy of Proto 5526 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) 5527 break; 5528 } 5529 if (!Adopts) 5530 return false; 5531 } 5532 return true; 5533 } 5534 5535 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for 5536 /// the given object type. 5537 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { 5538 llvm::FoldingSetNodeID ID; 5539 ObjCObjectPointerType::Profile(ID, ObjectT); 5540 5541 void *InsertPos = nullptr; 5542 if (ObjCObjectPointerType *QT = 5543 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 5544 return QualType(QT, 0); 5545 5546 // Find the canonical object type. 5547 QualType Canonical; 5548 if (!ObjectT.isCanonical()) { 5549 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); 5550 5551 // Regenerate InsertPos. 5552 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 5553 } 5554 5555 // No match. 5556 void *Mem = 5557 Allocate(sizeof(ObjCObjectPointerType), alignof(ObjCObjectPointerType)); 5558 auto *QType = 5559 new (Mem) ObjCObjectPointerType(Canonical, ObjectT); 5560 5561 Types.push_back(QType); 5562 ObjCObjectPointerTypes.InsertNode(QType, InsertPos); 5563 return QualType(QType, 0); 5564 } 5565 5566 /// getObjCInterfaceType - Return the unique reference to the type for the 5567 /// specified ObjC interface decl. The list of protocols is optional. 5568 QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, 5569 ObjCInterfaceDecl *PrevDecl) const { 5570 if (Decl->TypeForDecl) 5571 return QualType(Decl->TypeForDecl, 0); 5572 5573 if (PrevDecl) { 5574 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); 5575 Decl->TypeForDecl = PrevDecl->TypeForDecl; 5576 return QualType(PrevDecl->TypeForDecl, 0); 5577 } 5578 5579 // Prefer the definition, if there is one. 5580 if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) 5581 Decl = Def; 5582 5583 void *Mem = Allocate(sizeof(ObjCInterfaceType), alignof(ObjCInterfaceType)); 5584 auto *T = new (Mem) ObjCInterfaceType(Decl); 5585 Decl->TypeForDecl = T; 5586 Types.push_back(T); 5587 return QualType(T, 0); 5588 } 5589 5590 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique 5591 /// TypeOfExprType AST's (since expression's are never shared). For example, 5592 /// multiple declarations that refer to "typeof(x)" all contain different 5593 /// DeclRefExpr's. This doesn't effect the type checker, since it operates 5594 /// on canonical type's (which are always unique). 5595 QualType ASTContext::getTypeOfExprType(Expr *tofExpr, TypeOfKind Kind) const { 5596 TypeOfExprType *toe; 5597 if (tofExpr->isTypeDependent()) { 5598 llvm::FoldingSetNodeID ID; 5599 DependentTypeOfExprType::Profile(ID, *this, tofExpr, 5600 Kind == TypeOfKind::Unqualified); 5601 5602 void *InsertPos = nullptr; 5603 DependentTypeOfExprType *Canon = 5604 DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); 5605 if (Canon) { 5606 // We already have a "canonical" version of an identical, dependent 5607 // typeof(expr) type. Use that as our canonical type. 5608 toe = new (*this, alignof(TypeOfExprType)) 5609 TypeOfExprType(tofExpr, Kind, QualType((TypeOfExprType *)Canon, 0)); 5610 } else { 5611 // Build a new, canonical typeof(expr) type. 5612 Canon = new (*this, alignof(DependentTypeOfExprType)) 5613 DependentTypeOfExprType(tofExpr, Kind); 5614 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); 5615 toe = Canon; 5616 } 5617 } else { 5618 QualType Canonical = getCanonicalType(tofExpr->getType()); 5619 toe = new (*this, alignof(TypeOfExprType)) 5620 TypeOfExprType(tofExpr, Kind, Canonical); 5621 } 5622 Types.push_back(toe); 5623 return QualType(toe, 0); 5624 } 5625 5626 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique 5627 /// TypeOfType nodes. The only motivation to unique these nodes would be 5628 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be 5629 /// an issue. This doesn't affect the type checker, since it operates 5630 /// on canonical types (which are always unique). 5631 QualType ASTContext::getTypeOfType(QualType tofType, TypeOfKind Kind) const { 5632 QualType Canonical = getCanonicalType(tofType); 5633 auto *tot = 5634 new (*this, alignof(TypeOfType)) TypeOfType(tofType, Canonical, Kind); 5635 Types.push_back(tot); 5636 return QualType(tot, 0); 5637 } 5638 5639 /// getReferenceQualifiedType - Given an expr, will return the type for 5640 /// that expression, as in [dcl.type.simple]p4 but without taking id-expressions 5641 /// and class member access into account. 5642 QualType ASTContext::getReferenceQualifiedType(const Expr *E) const { 5643 // C++11 [dcl.type.simple]p4: 5644 // [...] 5645 QualType T = E->getType(); 5646 switch (E->getValueKind()) { 5647 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the 5648 // type of e; 5649 case VK_XValue: 5650 return getRValueReferenceType(T); 5651 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the 5652 // type of e; 5653 case VK_LValue: 5654 return getLValueReferenceType(T); 5655 // - otherwise, decltype(e) is the type of e. 5656 case VK_PRValue: 5657 return T; 5658 } 5659 llvm_unreachable("Unknown value kind"); 5660 } 5661 5662 /// Unlike many "get<Type>" functions, we don't unique DecltypeType 5663 /// nodes. This would never be helpful, since each such type has its own 5664 /// expression, and would not give a significant memory saving, since there 5665 /// is an Expr tree under each such type. 5666 QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { 5667 DecltypeType *dt; 5668 5669 // C++11 [temp.type]p2: 5670 // If an expression e involves a template parameter, decltype(e) denotes a 5671 // unique dependent type. Two such decltype-specifiers refer to the same 5672 // type only if their expressions are equivalent (14.5.6.1). 5673 if (e->isInstantiationDependent()) { 5674 llvm::FoldingSetNodeID ID; 5675 DependentDecltypeType::Profile(ID, *this, e); 5676 5677 void *InsertPos = nullptr; 5678 DependentDecltypeType *Canon 5679 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); 5680 if (!Canon) { 5681 // Build a new, canonical decltype(expr) type. 5682 Canon = new (*this, alignof(DependentDecltypeType)) 5683 DependentDecltypeType(e, DependentTy); 5684 DependentDecltypeTypes.InsertNode(Canon, InsertPos); 5685 } 5686 dt = new (*this, alignof(DecltypeType)) 5687 DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); 5688 } else { 5689 dt = new (*this, alignof(DecltypeType)) 5690 DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); 5691 } 5692 Types.push_back(dt); 5693 return QualType(dt, 0); 5694 } 5695 5696 /// getUnaryTransformationType - We don't unique these, since the memory 5697 /// savings are minimal and these are rare. 5698 QualType ASTContext::getUnaryTransformType(QualType BaseType, 5699 QualType UnderlyingType, 5700 UnaryTransformType::UTTKind Kind) 5701 const { 5702 UnaryTransformType *ut = nullptr; 5703 5704 if (BaseType->isDependentType()) { 5705 // Look in the folding set for an existing type. 5706 llvm::FoldingSetNodeID ID; 5707 DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); 5708 5709 void *InsertPos = nullptr; 5710 DependentUnaryTransformType *Canon 5711 = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); 5712 5713 if (!Canon) { 5714 // Build a new, canonical __underlying_type(type) type. 5715 Canon = new (*this, alignof(DependentUnaryTransformType)) 5716 DependentUnaryTransformType(*this, getCanonicalType(BaseType), Kind); 5717 DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); 5718 } 5719 ut = new (*this, alignof(UnaryTransformType)) 5720 UnaryTransformType(BaseType, QualType(), Kind, QualType(Canon, 0)); 5721 } else { 5722 QualType CanonType = getCanonicalType(UnderlyingType); 5723 ut = new (*this, alignof(UnaryTransformType)) 5724 UnaryTransformType(BaseType, UnderlyingType, Kind, CanonType); 5725 } 5726 Types.push_back(ut); 5727 return QualType(ut, 0); 5728 } 5729 5730 QualType ASTContext::getAutoTypeInternal( 5731 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent, 5732 bool IsPack, ConceptDecl *TypeConstraintConcept, 5733 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const { 5734 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && 5735 !TypeConstraintConcept && !IsDependent) 5736 return getAutoDeductType(); 5737 5738 // Look in the folding set for an existing type. 5739 void *InsertPos = nullptr; 5740 llvm::FoldingSetNodeID ID; 5741 AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, 5742 TypeConstraintConcept, TypeConstraintArgs); 5743 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) 5744 return QualType(AT, 0); 5745 5746 QualType Canon; 5747 if (!IsCanon) { 5748 if (!DeducedType.isNull()) { 5749 Canon = DeducedType.getCanonicalType(); 5750 } else if (TypeConstraintConcept) { 5751 bool AnyNonCanonArgs = false; 5752 ConceptDecl *CanonicalConcept = TypeConstraintConcept->getCanonicalDecl(); 5753 auto CanonicalConceptArgs = ::getCanonicalTemplateArguments( 5754 *this, TypeConstraintArgs, AnyNonCanonArgs); 5755 if (CanonicalConcept != TypeConstraintConcept || AnyNonCanonArgs) { 5756 Canon = 5757 getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack, 5758 CanonicalConcept, CanonicalConceptArgs, true); 5759 // Find the insert position again. 5760 [[maybe_unused]] auto *Nothing = 5761 AutoTypes.FindNodeOrInsertPos(ID, InsertPos); 5762 assert(!Nothing && "canonical type broken"); 5763 } 5764 } 5765 } 5766 5767 void *Mem = Allocate(sizeof(AutoType) + 5768 sizeof(TemplateArgument) * TypeConstraintArgs.size(), 5769 alignof(AutoType)); 5770 auto *AT = new (Mem) AutoType( 5771 DeducedType, Keyword, 5772 (IsDependent ? TypeDependence::DependentInstantiation 5773 : TypeDependence::None) | 5774 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), 5775 Canon, TypeConstraintConcept, TypeConstraintArgs); 5776 Types.push_back(AT); 5777 AutoTypes.InsertNode(AT, InsertPos); 5778 return QualType(AT, 0); 5779 } 5780 5781 /// getAutoType - Return the uniqued reference to the 'auto' type which has been 5782 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the 5783 /// canonical deduced-but-dependent 'auto' type. 5784 QualType 5785 ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, 5786 bool IsDependent, bool IsPack, 5787 ConceptDecl *TypeConstraintConcept, 5788 ArrayRef<TemplateArgument> TypeConstraintArgs) const { 5789 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); 5790 assert((!IsDependent || DeducedType.isNull()) && 5791 "A dependent auto should be undeduced"); 5792 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack, 5793 TypeConstraintConcept, TypeConstraintArgs); 5794 } 5795 5796 QualType ASTContext::getUnconstrainedType(QualType T) const { 5797 QualType CanonT = T.getCanonicalType(); 5798 5799 // Remove a type-constraint from a top-level auto or decltype(auto). 5800 if (auto *AT = CanonT->getAs<AutoType>()) { 5801 if (!AT->isConstrained()) 5802 return T; 5803 return getQualifiedType(getAutoType(QualType(), AT->getKeyword(), false, 5804 AT->containsUnexpandedParameterPack()), 5805 T.getQualifiers()); 5806 } 5807 5808 // FIXME: We only support constrained auto at the top level in the type of a 5809 // non-type template parameter at the moment. Once we lift that restriction, 5810 // we'll need to recursively build types containing auto here. 5811 assert(!CanonT->getContainedAutoType() || 5812 !CanonT->getContainedAutoType()->isConstrained()); 5813 return T; 5814 } 5815 5816 /// Return the uniqued reference to the deduced template specialization type 5817 /// which has been deduced to the given type, or to the canonical undeduced 5818 /// such type, or the canonical deduced-but-dependent such type. 5819 QualType ASTContext::getDeducedTemplateSpecializationType( 5820 TemplateName Template, QualType DeducedType, bool IsDependent) const { 5821 // Look in the folding set for an existing type. 5822 void *InsertPos = nullptr; 5823 llvm::FoldingSetNodeID ID; 5824 DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, 5825 IsDependent); 5826 if (DeducedTemplateSpecializationType *DTST = 5827 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) 5828 return QualType(DTST, 0); 5829 5830 auto *DTST = new (*this, alignof(DeducedTemplateSpecializationType)) 5831 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); 5832 llvm::FoldingSetNodeID TempID; 5833 DTST->Profile(TempID); 5834 assert(ID == TempID && "ID does not match"); 5835 Types.push_back(DTST); 5836 DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); 5837 return QualType(DTST, 0); 5838 } 5839 5840 /// getAtomicType - Return the uniqued reference to the atomic type for 5841 /// the given value type. 5842 QualType ASTContext::getAtomicType(QualType T) const { 5843 // Unique pointers, to guarantee there is only one pointer of a particular 5844 // structure. 5845 llvm::FoldingSetNodeID ID; 5846 AtomicType::Profile(ID, T); 5847 5848 void *InsertPos = nullptr; 5849 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) 5850 return QualType(AT, 0); 5851 5852 // If the atomic value type isn't canonical, this won't be a canonical type 5853 // either, so fill in the canonical type field. 5854 QualType Canonical; 5855 if (!T.isCanonical()) { 5856 Canonical = getAtomicType(getCanonicalType(T)); 5857 5858 // Get the new insert position for the node we care about. 5859 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); 5860 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 5861 } 5862 auto *New = new (*this, alignof(AtomicType)) AtomicType(T, Canonical); 5863 Types.push_back(New); 5864 AtomicTypes.InsertNode(New, InsertPos); 5865 return QualType(New, 0); 5866 } 5867 5868 /// getAutoDeductType - Get type pattern for deducing against 'auto'. 5869 QualType ASTContext::getAutoDeductType() const { 5870 if (AutoDeductTy.isNull()) 5871 AutoDeductTy = QualType(new (*this, alignof(AutoType)) 5872 AutoType(QualType(), AutoTypeKeyword::Auto, 5873 TypeDependence::None, QualType(), 5874 /*concept*/ nullptr, /*args*/ {}), 5875 0); 5876 return AutoDeductTy; 5877 } 5878 5879 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. 5880 QualType ASTContext::getAutoRRefDeductType() const { 5881 if (AutoRRefDeductTy.isNull()) 5882 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); 5883 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); 5884 return AutoRRefDeductTy; 5885 } 5886 5887 /// getTagDeclType - Return the unique reference to the type for the 5888 /// specified TagDecl (struct/union/class/enum) decl. 5889 QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { 5890 assert(Decl); 5891 // FIXME: What is the design on getTagDeclType when it requires casting 5892 // away const? mutable? 5893 return getTypeDeclType(const_cast<TagDecl*>(Decl)); 5894 } 5895 5896 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result 5897 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and 5898 /// needs to agree with the definition in <stddef.h>. 5899 CanQualType ASTContext::getSizeType() const { 5900 return getFromTargetType(Target->getSizeType()); 5901 } 5902 5903 /// Return the unique signed counterpart of the integer type 5904 /// corresponding to size_t. 5905 CanQualType ASTContext::getSignedSizeType() const { 5906 return getFromTargetType(Target->getSignedSizeType()); 5907 } 5908 5909 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). 5910 CanQualType ASTContext::getIntMaxType() const { 5911 return getFromTargetType(Target->getIntMaxType()); 5912 } 5913 5914 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). 5915 CanQualType ASTContext::getUIntMaxType() const { 5916 return getFromTargetType(Target->getUIntMaxType()); 5917 } 5918 5919 /// getSignedWCharType - Return the type of "signed wchar_t". 5920 /// Used when in C++, as a GCC extension. 5921 QualType ASTContext::getSignedWCharType() const { 5922 // FIXME: derive from "Target" ? 5923 return WCharTy; 5924 } 5925 5926 /// getUnsignedWCharType - Return the type of "unsigned wchar_t". 5927 /// Used when in C++, as a GCC extension. 5928 QualType ASTContext::getUnsignedWCharType() const { 5929 // FIXME: derive from "Target" ? 5930 return UnsignedIntTy; 5931 } 5932 5933 QualType ASTContext::getIntPtrType() const { 5934 return getFromTargetType(Target->getIntPtrType()); 5935 } 5936 5937 QualType ASTContext::getUIntPtrType() const { 5938 return getCorrespondingUnsignedType(getIntPtrType()); 5939 } 5940 5941 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) 5942 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). 5943 QualType ASTContext::getPointerDiffType() const { 5944 return getFromTargetType(Target->getPtrDiffType(LangAS::Default)); 5945 } 5946 5947 /// Return the unique unsigned counterpart of "ptrdiff_t" 5948 /// integer type. The standard (C11 7.21.6.1p7) refers to this type 5949 /// in the definition of %tu format specifier. 5950 QualType ASTContext::getUnsignedPointerDiffType() const { 5951 return getFromTargetType(Target->getUnsignedPtrDiffType(LangAS::Default)); 5952 } 5953 5954 /// Return the unique type for "pid_t" defined in 5955 /// <sys/types.h>. We need this to compute the correct type for vfork(). 5956 QualType ASTContext::getProcessIDType() const { 5957 return getFromTargetType(Target->getProcessIDType()); 5958 } 5959 5960 //===----------------------------------------------------------------------===// 5961 // Type Operators 5962 //===----------------------------------------------------------------------===// 5963 5964 CanQualType ASTContext::getCanonicalParamType(QualType T) const { 5965 // Push qualifiers into arrays, and then discard any remaining 5966 // qualifiers. 5967 T = getCanonicalType(T); 5968 T = getVariableArrayDecayedType(T); 5969 const Type *Ty = T.getTypePtr(); 5970 QualType Result; 5971 if (isa<ArrayType>(Ty)) { 5972 Result = getArrayDecayedType(QualType(Ty,0)); 5973 } else if (isa<FunctionType>(Ty)) { 5974 Result = getPointerType(QualType(Ty, 0)); 5975 } else { 5976 Result = QualType(Ty, 0); 5977 } 5978 5979 return CanQualType::CreateUnsafe(Result); 5980 } 5981 5982 QualType ASTContext::getUnqualifiedArrayType(QualType type, 5983 Qualifiers &quals) { 5984 SplitQualType splitType = type.getSplitUnqualifiedType(); 5985 5986 // FIXME: getSplitUnqualifiedType() actually walks all the way to 5987 // the unqualified desugared type and then drops it on the floor. 5988 // We then have to strip that sugar back off with 5989 // getUnqualifiedDesugaredType(), which is silly. 5990 const auto *AT = 5991 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); 5992 5993 // If we don't have an array, just use the results in splitType. 5994 if (!AT) { 5995 quals = splitType.Quals; 5996 return QualType(splitType.Ty, 0); 5997 } 5998 5999 // Otherwise, recurse on the array's element type. 6000 QualType elementType = AT->getElementType(); 6001 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); 6002 6003 // If that didn't change the element type, AT has no qualifiers, so we 6004 // can just use the results in splitType. 6005 if (elementType == unqualElementType) { 6006 assert(quals.empty()); // from the recursive call 6007 quals = splitType.Quals; 6008 return QualType(splitType.Ty, 0); 6009 } 6010 6011 // Otherwise, add in the qualifiers from the outermost type, then 6012 // build the type back up. 6013 quals.addConsistentQualifiers(splitType.Quals); 6014 6015 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 6016 return getConstantArrayType(unqualElementType, CAT->getSize(), 6017 CAT->getSizeExpr(), CAT->getSizeModifier(), 0); 6018 } 6019 6020 if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) { 6021 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); 6022 } 6023 6024 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) { 6025 return getVariableArrayType(unqualElementType, 6026 VAT->getSizeExpr(), 6027 VAT->getSizeModifier(), 6028 VAT->getIndexTypeCVRQualifiers(), 6029 VAT->getBracketsRange()); 6030 } 6031 6032 const auto *DSAT = cast<DependentSizedArrayType>(AT); 6033 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), 6034 DSAT->getSizeModifier(), 0, 6035 SourceRange()); 6036 } 6037 6038 /// Attempt to unwrap two types that may both be array types with the same bound 6039 /// (or both be array types of unknown bound) for the purpose of comparing the 6040 /// cv-decomposition of two types per C++ [conv.qual]. 6041 /// 6042 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6043 /// C++20 [conv.qual], if permitted by the current language mode. 6044 void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2, 6045 bool AllowPiMismatch) { 6046 while (true) { 6047 auto *AT1 = getAsArrayType(T1); 6048 if (!AT1) 6049 return; 6050 6051 auto *AT2 = getAsArrayType(T2); 6052 if (!AT2) 6053 return; 6054 6055 // If we don't have two array types with the same constant bound nor two 6056 // incomplete array types, we've unwrapped everything we can. 6057 // C++20 also permits one type to be a constant array type and the other 6058 // to be an incomplete array type. 6059 // FIXME: Consider also unwrapping array of unknown bound and VLA. 6060 if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) { 6061 auto *CAT2 = dyn_cast<ConstantArrayType>(AT2); 6062 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) || 6063 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6064 isa<IncompleteArrayType>(AT2)))) 6065 return; 6066 } else if (isa<IncompleteArrayType>(AT1)) { 6067 if (!(isa<IncompleteArrayType>(AT2) || 6068 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 6069 isa<ConstantArrayType>(AT2)))) 6070 return; 6071 } else { 6072 return; 6073 } 6074 6075 T1 = AT1->getElementType(); 6076 T2 = AT2->getElementType(); 6077 } 6078 } 6079 6080 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). 6081 /// 6082 /// If T1 and T2 are both pointer types of the same kind, or both array types 6083 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is 6084 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. 6085 /// 6086 /// This function will typically be called in a loop that successively 6087 /// "unwraps" pointer and pointer-to-member types to compare them at each 6088 /// level. 6089 /// 6090 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 6091 /// C++20 [conv.qual], if permitted by the current language mode. 6092 /// 6093 /// \return \c true if a pointer type was unwrapped, \c false if we reached a 6094 /// pair of types that can't be unwrapped further. 6095 bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2, 6096 bool AllowPiMismatch) { 6097 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch); 6098 6099 const auto *T1PtrType = T1->getAs<PointerType>(); 6100 const auto *T2PtrType = T2->getAs<PointerType>(); 6101 if (T1PtrType && T2PtrType) { 6102 T1 = T1PtrType->getPointeeType(); 6103 T2 = T2PtrType->getPointeeType(); 6104 return true; 6105 } 6106 6107 const auto *T1MPType = T1->getAs<MemberPointerType>(); 6108 const auto *T2MPType = T2->getAs<MemberPointerType>(); 6109 if (T1MPType && T2MPType && 6110 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), 6111 QualType(T2MPType->getClass(), 0))) { 6112 T1 = T1MPType->getPointeeType(); 6113 T2 = T2MPType->getPointeeType(); 6114 return true; 6115 } 6116 6117 if (getLangOpts().ObjC) { 6118 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>(); 6119 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>(); 6120 if (T1OPType && T2OPType) { 6121 T1 = T1OPType->getPointeeType(); 6122 T2 = T2OPType->getPointeeType(); 6123 return true; 6124 } 6125 } 6126 6127 // FIXME: Block pointers, too? 6128 6129 return false; 6130 } 6131 6132 bool ASTContext::hasSimilarType(QualType T1, QualType T2) { 6133 while (true) { 6134 Qualifiers Quals; 6135 T1 = getUnqualifiedArrayType(T1, Quals); 6136 T2 = getUnqualifiedArrayType(T2, Quals); 6137 if (hasSameType(T1, T2)) 6138 return true; 6139 if (!UnwrapSimilarTypes(T1, T2)) 6140 return false; 6141 } 6142 } 6143 6144 bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { 6145 while (true) { 6146 Qualifiers Quals1, Quals2; 6147 T1 = getUnqualifiedArrayType(T1, Quals1); 6148 T2 = getUnqualifiedArrayType(T2, Quals2); 6149 6150 Quals1.removeCVRQualifiers(); 6151 Quals2.removeCVRQualifiers(); 6152 if (Quals1 != Quals2) 6153 return false; 6154 6155 if (hasSameType(T1, T2)) 6156 return true; 6157 6158 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false)) 6159 return false; 6160 } 6161 } 6162 6163 DeclarationNameInfo 6164 ASTContext::getNameForTemplate(TemplateName Name, 6165 SourceLocation NameLoc) const { 6166 switch (Name.getKind()) { 6167 case TemplateName::QualifiedTemplate: 6168 case TemplateName::Template: 6169 // DNInfo work in progress: CHECKME: what about DNLoc? 6170 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), 6171 NameLoc); 6172 6173 case TemplateName::OverloadedTemplate: { 6174 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); 6175 // DNInfo work in progress: CHECKME: what about DNLoc? 6176 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); 6177 } 6178 6179 case TemplateName::AssumedTemplate: { 6180 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); 6181 return DeclarationNameInfo(Storage->getDeclName(), NameLoc); 6182 } 6183 6184 case TemplateName::DependentTemplate: { 6185 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6186 DeclarationName DName; 6187 if (DTN->isIdentifier()) { 6188 DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); 6189 return DeclarationNameInfo(DName, NameLoc); 6190 } else { 6191 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); 6192 // DNInfo work in progress: FIXME: source locations? 6193 DeclarationNameLoc DNLoc = 6194 DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange()); 6195 return DeclarationNameInfo(DName, NameLoc, DNLoc); 6196 } 6197 } 6198 6199 case TemplateName::SubstTemplateTemplateParm: { 6200 SubstTemplateTemplateParmStorage *subst 6201 = Name.getAsSubstTemplateTemplateParm(); 6202 return DeclarationNameInfo(subst->getParameter()->getDeclName(), 6203 NameLoc); 6204 } 6205 6206 case TemplateName::SubstTemplateTemplateParmPack: { 6207 SubstTemplateTemplateParmPackStorage *subst 6208 = Name.getAsSubstTemplateTemplateParmPack(); 6209 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), 6210 NameLoc); 6211 } 6212 case TemplateName::UsingTemplate: 6213 return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(), 6214 NameLoc); 6215 } 6216 6217 llvm_unreachable("bad template name kind!"); 6218 } 6219 6220 TemplateName 6221 ASTContext::getCanonicalTemplateName(const TemplateName &Name) const { 6222 switch (Name.getKind()) { 6223 case TemplateName::UsingTemplate: 6224 case TemplateName::QualifiedTemplate: 6225 case TemplateName::Template: { 6226 TemplateDecl *Template = Name.getAsTemplateDecl(); 6227 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) 6228 Template = getCanonicalTemplateTemplateParmDecl(TTP); 6229 6230 // The canonical template name is the canonical template declaration. 6231 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); 6232 } 6233 6234 case TemplateName::OverloadedTemplate: 6235 case TemplateName::AssumedTemplate: 6236 llvm_unreachable("cannot canonicalize unresolved template"); 6237 6238 case TemplateName::DependentTemplate: { 6239 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6240 assert(DTN && "Non-dependent template names must refer to template decls."); 6241 return DTN->CanonicalTemplateName; 6242 } 6243 6244 case TemplateName::SubstTemplateTemplateParm: { 6245 SubstTemplateTemplateParmStorage *subst 6246 = Name.getAsSubstTemplateTemplateParm(); 6247 return getCanonicalTemplateName(subst->getReplacement()); 6248 } 6249 6250 case TemplateName::SubstTemplateTemplateParmPack: { 6251 SubstTemplateTemplateParmPackStorage *subst = 6252 Name.getAsSubstTemplateTemplateParmPack(); 6253 TemplateArgument canonArgPack = 6254 getCanonicalTemplateArgument(subst->getArgumentPack()); 6255 return getSubstTemplateTemplateParmPack( 6256 canonArgPack, subst->getAssociatedDecl()->getCanonicalDecl(), 6257 subst->getFinal(), subst->getIndex()); 6258 } 6259 } 6260 6261 llvm_unreachable("bad template name!"); 6262 } 6263 6264 bool ASTContext::hasSameTemplateName(const TemplateName &X, 6265 const TemplateName &Y) const { 6266 return getCanonicalTemplateName(X).getAsVoidPointer() == 6267 getCanonicalTemplateName(Y).getAsVoidPointer(); 6268 } 6269 6270 bool ASTContext::isSameConstraintExpr(const Expr *XCE, const Expr *YCE) const { 6271 if (!XCE != !YCE) 6272 return false; 6273 6274 if (!XCE) 6275 return true; 6276 6277 llvm::FoldingSetNodeID XCEID, YCEID; 6278 XCE->Profile(XCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true); 6279 YCE->Profile(YCEID, *this, /*Canonical=*/true, /*ProfileLambdaExpr=*/true); 6280 return XCEID == YCEID; 6281 } 6282 6283 bool ASTContext::isSameTypeConstraint(const TypeConstraint *XTC, 6284 const TypeConstraint *YTC) const { 6285 if (!XTC != !YTC) 6286 return false; 6287 6288 if (!XTC) 6289 return true; 6290 6291 auto *NCX = XTC->getNamedConcept(); 6292 auto *NCY = YTC->getNamedConcept(); 6293 if (!NCX || !NCY || !isSameEntity(NCX, NCY)) 6294 return false; 6295 if (XTC->getConceptReference()->hasExplicitTemplateArgs() != 6296 YTC->getConceptReference()->hasExplicitTemplateArgs()) 6297 return false; 6298 if (XTC->getConceptReference()->hasExplicitTemplateArgs()) 6299 if (XTC->getConceptReference() 6300 ->getTemplateArgsAsWritten() 6301 ->NumTemplateArgs != 6302 YTC->getConceptReference()->getTemplateArgsAsWritten()->NumTemplateArgs) 6303 return false; 6304 6305 // Compare slowly by profiling. 6306 // 6307 // We couldn't compare the profiling result for the template 6308 // args here. Consider the following example in different modules: 6309 // 6310 // template <__integer_like _Tp, C<_Tp> Sentinel> 6311 // constexpr _Tp operator()(_Tp &&__t, Sentinel &&last) const { 6312 // return __t; 6313 // } 6314 // 6315 // When we compare the profiling result for `C<_Tp>` in different 6316 // modules, it will compare the type of `_Tp` in different modules. 6317 // However, the type of `_Tp` in different modules refer to different 6318 // types here naturally. So we couldn't compare the profiling result 6319 // for the template args directly. 6320 return isSameConstraintExpr(XTC->getImmediatelyDeclaredConstraint(), 6321 YTC->getImmediatelyDeclaredConstraint()); 6322 } 6323 6324 bool ASTContext::isSameTemplateParameter(const NamedDecl *X, 6325 const NamedDecl *Y) const { 6326 if (X->getKind() != Y->getKind()) 6327 return false; 6328 6329 if (auto *TX = dyn_cast<TemplateTypeParmDecl>(X)) { 6330 auto *TY = cast<TemplateTypeParmDecl>(Y); 6331 if (TX->isParameterPack() != TY->isParameterPack()) 6332 return false; 6333 if (TX->hasTypeConstraint() != TY->hasTypeConstraint()) 6334 return false; 6335 return isSameTypeConstraint(TX->getTypeConstraint(), 6336 TY->getTypeConstraint()); 6337 } 6338 6339 if (auto *TX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6340 auto *TY = cast<NonTypeTemplateParmDecl>(Y); 6341 return TX->isParameterPack() == TY->isParameterPack() && 6342 TX->getASTContext().hasSameType(TX->getType(), TY->getType()) && 6343 isSameConstraintExpr(TX->getPlaceholderTypeConstraint(), 6344 TY->getPlaceholderTypeConstraint()); 6345 } 6346 6347 auto *TX = cast<TemplateTemplateParmDecl>(X); 6348 auto *TY = cast<TemplateTemplateParmDecl>(Y); 6349 return TX->isParameterPack() == TY->isParameterPack() && 6350 isSameTemplateParameterList(TX->getTemplateParameters(), 6351 TY->getTemplateParameters()); 6352 } 6353 6354 bool ASTContext::isSameTemplateParameterList( 6355 const TemplateParameterList *X, const TemplateParameterList *Y) const { 6356 if (X->size() != Y->size()) 6357 return false; 6358 6359 for (unsigned I = 0, N = X->size(); I != N; ++I) 6360 if (!isSameTemplateParameter(X->getParam(I), Y->getParam(I))) 6361 return false; 6362 6363 return isSameConstraintExpr(X->getRequiresClause(), Y->getRequiresClause()); 6364 } 6365 6366 bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X, 6367 const NamedDecl *Y) const { 6368 // If the type parameter isn't the same already, we don't need to check the 6369 // default argument further. 6370 if (!isSameTemplateParameter(X, Y)) 6371 return false; 6372 6373 if (auto *TTPX = dyn_cast<TemplateTypeParmDecl>(X)) { 6374 auto *TTPY = cast<TemplateTypeParmDecl>(Y); 6375 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6376 return false; 6377 6378 return hasSameType(TTPX->getDefaultArgument(), TTPY->getDefaultArgument()); 6379 } 6380 6381 if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(X)) { 6382 auto *NTTPY = cast<NonTypeTemplateParmDecl>(Y); 6383 if (!NTTPX->hasDefaultArgument() || !NTTPY->hasDefaultArgument()) 6384 return false; 6385 6386 Expr *DefaultArgumentX = NTTPX->getDefaultArgument()->IgnoreImpCasts(); 6387 Expr *DefaultArgumentY = NTTPY->getDefaultArgument()->IgnoreImpCasts(); 6388 llvm::FoldingSetNodeID XID, YID; 6389 DefaultArgumentX->Profile(XID, *this, /*Canonical=*/true); 6390 DefaultArgumentY->Profile(YID, *this, /*Canonical=*/true); 6391 return XID == YID; 6392 } 6393 6394 auto *TTPX = cast<TemplateTemplateParmDecl>(X); 6395 auto *TTPY = cast<TemplateTemplateParmDecl>(Y); 6396 6397 if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument()) 6398 return false; 6399 6400 const TemplateArgument &TAX = TTPX->getDefaultArgument().getArgument(); 6401 const TemplateArgument &TAY = TTPY->getDefaultArgument().getArgument(); 6402 return hasSameTemplateName(TAX.getAsTemplate(), TAY.getAsTemplate()); 6403 } 6404 6405 static NamespaceDecl *getNamespace(const NestedNameSpecifier *X) { 6406 if (auto *NS = X->getAsNamespace()) 6407 return NS; 6408 if (auto *NAS = X->getAsNamespaceAlias()) 6409 return NAS->getNamespace(); 6410 return nullptr; 6411 } 6412 6413 static bool isSameQualifier(const NestedNameSpecifier *X, 6414 const NestedNameSpecifier *Y) { 6415 if (auto *NSX = getNamespace(X)) { 6416 auto *NSY = getNamespace(Y); 6417 if (!NSY || NSX->getCanonicalDecl() != NSY->getCanonicalDecl()) 6418 return false; 6419 } else if (X->getKind() != Y->getKind()) 6420 return false; 6421 6422 // FIXME: For namespaces and types, we're permitted to check that the entity 6423 // is named via the same tokens. We should probably do so. 6424 switch (X->getKind()) { 6425 case NestedNameSpecifier::Identifier: 6426 if (X->getAsIdentifier() != Y->getAsIdentifier()) 6427 return false; 6428 break; 6429 case NestedNameSpecifier::Namespace: 6430 case NestedNameSpecifier::NamespaceAlias: 6431 // We've already checked that we named the same namespace. 6432 break; 6433 case NestedNameSpecifier::TypeSpec: 6434 case NestedNameSpecifier::TypeSpecWithTemplate: 6435 if (X->getAsType()->getCanonicalTypeInternal() != 6436 Y->getAsType()->getCanonicalTypeInternal()) 6437 return false; 6438 break; 6439 case NestedNameSpecifier::Global: 6440 case NestedNameSpecifier::Super: 6441 return true; 6442 } 6443 6444 // Recurse into earlier portion of NNS, if any. 6445 auto *PX = X->getPrefix(); 6446 auto *PY = Y->getPrefix(); 6447 if (PX && PY) 6448 return isSameQualifier(PX, PY); 6449 return !PX && !PY; 6450 } 6451 6452 /// Determine whether the attributes we can overload on are identical for A and 6453 /// B. Will ignore any overloadable attrs represented in the type of A and B. 6454 static bool hasSameOverloadableAttrs(const FunctionDecl *A, 6455 const FunctionDecl *B) { 6456 // Note that pass_object_size attributes are represented in the function's 6457 // ExtParameterInfo, so we don't need to check them here. 6458 6459 llvm::FoldingSetNodeID Cand1ID, Cand2ID; 6460 auto AEnableIfAttrs = A->specific_attrs<EnableIfAttr>(); 6461 auto BEnableIfAttrs = B->specific_attrs<EnableIfAttr>(); 6462 6463 for (auto Pair : zip_longest(AEnableIfAttrs, BEnableIfAttrs)) { 6464 std::optional<EnableIfAttr *> Cand1A = std::get<0>(Pair); 6465 std::optional<EnableIfAttr *> Cand2A = std::get<1>(Pair); 6466 6467 // Return false if the number of enable_if attributes is different. 6468 if (!Cand1A || !Cand2A) 6469 return false; 6470 6471 Cand1ID.clear(); 6472 Cand2ID.clear(); 6473 6474 (*Cand1A)->getCond()->Profile(Cand1ID, A->getASTContext(), true); 6475 (*Cand2A)->getCond()->Profile(Cand2ID, B->getASTContext(), true); 6476 6477 // Return false if any of the enable_if expressions of A and B are 6478 // different. 6479 if (Cand1ID != Cand2ID) 6480 return false; 6481 } 6482 return true; 6483 } 6484 6485 bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) const { 6486 // Caution: this function is called by the AST reader during deserialization, 6487 // so it cannot rely on AST invariants being met. Non-trivial accessors 6488 // should be avoided, along with any traversal of redeclaration chains. 6489 6490 if (X == Y) 6491 return true; 6492 6493 if (X->getDeclName() != Y->getDeclName()) 6494 return false; 6495 6496 // Must be in the same context. 6497 // 6498 // Note that we can't use DeclContext::Equals here, because the DeclContexts 6499 // could be two different declarations of the same function. (We will fix the 6500 // semantic DC to refer to the primary definition after merging.) 6501 if (!declaresSameEntity(cast<Decl>(X->getDeclContext()->getRedeclContext()), 6502 cast<Decl>(Y->getDeclContext()->getRedeclContext()))) 6503 return false; 6504 6505 // Two typedefs refer to the same entity if they have the same underlying 6506 // type. 6507 if (const auto *TypedefX = dyn_cast<TypedefNameDecl>(X)) 6508 if (const auto *TypedefY = dyn_cast<TypedefNameDecl>(Y)) 6509 return hasSameType(TypedefX->getUnderlyingType(), 6510 TypedefY->getUnderlyingType()); 6511 6512 // Must have the same kind. 6513 if (X->getKind() != Y->getKind()) 6514 return false; 6515 6516 // Objective-C classes and protocols with the same name always match. 6517 if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X)) 6518 return true; 6519 6520 if (isa<ClassTemplateSpecializationDecl>(X)) { 6521 // No need to handle these here: we merge them when adding them to the 6522 // template. 6523 return false; 6524 } 6525 6526 // Compatible tags match. 6527 if (const auto *TagX = dyn_cast<TagDecl>(X)) { 6528 const auto *TagY = cast<TagDecl>(Y); 6529 return (TagX->getTagKind() == TagY->getTagKind()) || 6530 ((TagX->getTagKind() == TagTypeKind::Struct || 6531 TagX->getTagKind() == TagTypeKind::Class || 6532 TagX->getTagKind() == TagTypeKind::Interface) && 6533 (TagY->getTagKind() == TagTypeKind::Struct || 6534 TagY->getTagKind() == TagTypeKind::Class || 6535 TagY->getTagKind() == TagTypeKind::Interface)); 6536 } 6537 6538 // Functions with the same type and linkage match. 6539 // FIXME: This needs to cope with merging of prototyped/non-prototyped 6540 // functions, etc. 6541 if (const auto *FuncX = dyn_cast<FunctionDecl>(X)) { 6542 const auto *FuncY = cast<FunctionDecl>(Y); 6543 if (const auto *CtorX = dyn_cast<CXXConstructorDecl>(X)) { 6544 const auto *CtorY = cast<CXXConstructorDecl>(Y); 6545 if (CtorX->getInheritedConstructor() && 6546 !isSameEntity(CtorX->getInheritedConstructor().getConstructor(), 6547 CtorY->getInheritedConstructor().getConstructor())) 6548 return false; 6549 } 6550 6551 if (FuncX->isMultiVersion() != FuncY->isMultiVersion()) 6552 return false; 6553 6554 // Multiversioned functions with different feature strings are represented 6555 // as separate declarations. 6556 if (FuncX->isMultiVersion()) { 6557 const auto *TAX = FuncX->getAttr<TargetAttr>(); 6558 const auto *TAY = FuncY->getAttr<TargetAttr>(); 6559 assert(TAX && TAY && "Multiversion Function without target attribute"); 6560 6561 if (TAX->getFeaturesStr() != TAY->getFeaturesStr()) 6562 return false; 6563 } 6564 6565 // Per C++20 [temp.over.link]/4, friends in different classes are sometimes 6566 // not the same entity if they are constrained. 6567 if ((FuncX->isMemberLikeConstrainedFriend() || 6568 FuncY->isMemberLikeConstrainedFriend()) && 6569 !FuncX->getLexicalDeclContext()->Equals( 6570 FuncY->getLexicalDeclContext())) { 6571 return false; 6572 } 6573 6574 if (!isSameConstraintExpr(FuncX->getTrailingRequiresClause(), 6575 FuncY->getTrailingRequiresClause())) 6576 return false; 6577 6578 auto GetTypeAsWritten = [](const FunctionDecl *FD) { 6579 // Map to the first declaration that we've already merged into this one. 6580 // The TSI of redeclarations might not match (due to calling conventions 6581 // being inherited onto the type but not the TSI), but the TSI type of 6582 // the first declaration of the function should match across modules. 6583 FD = FD->getCanonicalDecl(); 6584 return FD->getTypeSourceInfo() ? FD->getTypeSourceInfo()->getType() 6585 : FD->getType(); 6586 }; 6587 QualType XT = GetTypeAsWritten(FuncX), YT = GetTypeAsWritten(FuncY); 6588 if (!hasSameType(XT, YT)) { 6589 // We can get functions with different types on the redecl chain in C++17 6590 // if they have differing exception specifications and at least one of 6591 // the excpetion specs is unresolved. 6592 auto *XFPT = XT->getAs<FunctionProtoType>(); 6593 auto *YFPT = YT->getAs<FunctionProtoType>(); 6594 if (getLangOpts().CPlusPlus17 && XFPT && YFPT && 6595 (isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) || 6596 isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) && 6597 hasSameFunctionTypeIgnoringExceptionSpec(XT, YT)) 6598 return true; 6599 return false; 6600 } 6601 6602 return FuncX->getLinkageInternal() == FuncY->getLinkageInternal() && 6603 hasSameOverloadableAttrs(FuncX, FuncY); 6604 } 6605 6606 // Variables with the same type and linkage match. 6607 if (const auto *VarX = dyn_cast<VarDecl>(X)) { 6608 const auto *VarY = cast<VarDecl>(Y); 6609 if (VarX->getLinkageInternal() == VarY->getLinkageInternal()) { 6610 // During deserialization, we might compare variables before we load 6611 // their types. Assume the types will end up being the same. 6612 if (VarX->getType().isNull() || VarY->getType().isNull()) 6613 return true; 6614 6615 if (hasSameType(VarX->getType(), VarY->getType())) 6616 return true; 6617 6618 // We can get decls with different types on the redecl chain. Eg. 6619 // template <typename T> struct S { static T Var[]; }; // #1 6620 // template <typename T> T S<T>::Var[sizeof(T)]; // #2 6621 // Only? happens when completing an incomplete array type. In this case 6622 // when comparing #1 and #2 we should go through their element type. 6623 const ArrayType *VarXTy = getAsArrayType(VarX->getType()); 6624 const ArrayType *VarYTy = getAsArrayType(VarY->getType()); 6625 if (!VarXTy || !VarYTy) 6626 return false; 6627 if (VarXTy->isIncompleteArrayType() || VarYTy->isIncompleteArrayType()) 6628 return hasSameType(VarXTy->getElementType(), VarYTy->getElementType()); 6629 } 6630 return false; 6631 } 6632 6633 // Namespaces with the same name and inlinedness match. 6634 if (const auto *NamespaceX = dyn_cast<NamespaceDecl>(X)) { 6635 const auto *NamespaceY = cast<NamespaceDecl>(Y); 6636 return NamespaceX->isInline() == NamespaceY->isInline(); 6637 } 6638 6639 // Identical template names and kinds match if their template parameter lists 6640 // and patterns match. 6641 if (const auto *TemplateX = dyn_cast<TemplateDecl>(X)) { 6642 const auto *TemplateY = cast<TemplateDecl>(Y); 6643 6644 // ConceptDecl wouldn't be the same if their constraint expression differs. 6645 if (const auto *ConceptX = dyn_cast<ConceptDecl>(X)) { 6646 const auto *ConceptY = cast<ConceptDecl>(Y); 6647 if (!isSameConstraintExpr(ConceptX->getConstraintExpr(), 6648 ConceptY->getConstraintExpr())) 6649 return false; 6650 } 6651 6652 return isSameEntity(TemplateX->getTemplatedDecl(), 6653 TemplateY->getTemplatedDecl()) && 6654 isSameTemplateParameterList(TemplateX->getTemplateParameters(), 6655 TemplateY->getTemplateParameters()); 6656 } 6657 6658 // Fields with the same name and the same type match. 6659 if (const auto *FDX = dyn_cast<FieldDecl>(X)) { 6660 const auto *FDY = cast<FieldDecl>(Y); 6661 // FIXME: Also check the bitwidth is odr-equivalent, if any. 6662 return hasSameType(FDX->getType(), FDY->getType()); 6663 } 6664 6665 // Indirect fields with the same target field match. 6666 if (const auto *IFDX = dyn_cast<IndirectFieldDecl>(X)) { 6667 const auto *IFDY = cast<IndirectFieldDecl>(Y); 6668 return IFDX->getAnonField()->getCanonicalDecl() == 6669 IFDY->getAnonField()->getCanonicalDecl(); 6670 } 6671 6672 // Enumerators with the same name match. 6673 if (isa<EnumConstantDecl>(X)) 6674 // FIXME: Also check the value is odr-equivalent. 6675 return true; 6676 6677 // Using shadow declarations with the same target match. 6678 if (const auto *USX = dyn_cast<UsingShadowDecl>(X)) { 6679 const auto *USY = cast<UsingShadowDecl>(Y); 6680 return USX->getTargetDecl() == USY->getTargetDecl(); 6681 } 6682 6683 // Using declarations with the same qualifier match. (We already know that 6684 // the name matches.) 6685 if (const auto *UX = dyn_cast<UsingDecl>(X)) { 6686 const auto *UY = cast<UsingDecl>(Y); 6687 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6688 UX->hasTypename() == UY->hasTypename() && 6689 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6690 } 6691 if (const auto *UX = dyn_cast<UnresolvedUsingValueDecl>(X)) { 6692 const auto *UY = cast<UnresolvedUsingValueDecl>(Y); 6693 return isSameQualifier(UX->getQualifier(), UY->getQualifier()) && 6694 UX->isAccessDeclaration() == UY->isAccessDeclaration(); 6695 } 6696 if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) { 6697 return isSameQualifier( 6698 UX->getQualifier(), 6699 cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier()); 6700 } 6701 6702 // Using-pack declarations are only created by instantiation, and match if 6703 // they're instantiated from matching UnresolvedUsing...Decls. 6704 if (const auto *UX = dyn_cast<UsingPackDecl>(X)) { 6705 return declaresSameEntity( 6706 UX->getInstantiatedFromUsingDecl(), 6707 cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl()); 6708 } 6709 6710 // Namespace alias definitions with the same target match. 6711 if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) { 6712 const auto *NAY = cast<NamespaceAliasDecl>(Y); 6713 return NAX->getNamespace()->Equals(NAY->getNamespace()); 6714 } 6715 6716 return false; 6717 } 6718 6719 TemplateArgument 6720 ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { 6721 switch (Arg.getKind()) { 6722 case TemplateArgument::Null: 6723 return Arg; 6724 6725 case TemplateArgument::Expression: 6726 return Arg; 6727 6728 case TemplateArgument::Declaration: { 6729 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); 6730 return TemplateArgument(D, getCanonicalType(Arg.getParamTypeForDecl()), 6731 Arg.getIsDefaulted()); 6732 } 6733 6734 case TemplateArgument::NullPtr: 6735 return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), 6736 /*isNullPtr*/ true, Arg.getIsDefaulted()); 6737 6738 case TemplateArgument::Template: 6739 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()), 6740 Arg.getIsDefaulted()); 6741 6742 case TemplateArgument::TemplateExpansion: 6743 return TemplateArgument( 6744 getCanonicalTemplateName(Arg.getAsTemplateOrTemplatePattern()), 6745 Arg.getNumTemplateExpansions(), Arg.getIsDefaulted()); 6746 6747 case TemplateArgument::Integral: 6748 return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); 6749 6750 case TemplateArgument::Type: 6751 return TemplateArgument(getCanonicalType(Arg.getAsType()), 6752 /*isNullPtr*/ false, Arg.getIsDefaulted()); 6753 6754 case TemplateArgument::Pack: { 6755 bool AnyNonCanonArgs = false; 6756 auto CanonArgs = ::getCanonicalTemplateArguments( 6757 *this, Arg.pack_elements(), AnyNonCanonArgs); 6758 if (!AnyNonCanonArgs) 6759 return Arg; 6760 return TemplateArgument::CreatePackCopy(const_cast<ASTContext &>(*this), 6761 CanonArgs); 6762 } 6763 } 6764 6765 // Silence GCC warning 6766 llvm_unreachable("Unhandled template argument kind"); 6767 } 6768 6769 NestedNameSpecifier * 6770 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { 6771 if (!NNS) 6772 return nullptr; 6773 6774 switch (NNS->getKind()) { 6775 case NestedNameSpecifier::Identifier: 6776 // Canonicalize the prefix but keep the identifier the same. 6777 return NestedNameSpecifier::Create(*this, 6778 getCanonicalNestedNameSpecifier(NNS->getPrefix()), 6779 NNS->getAsIdentifier()); 6780 6781 case NestedNameSpecifier::Namespace: 6782 // A namespace is canonical; build a nested-name-specifier with 6783 // this namespace and no prefix. 6784 return NestedNameSpecifier::Create(*this, nullptr, 6785 NNS->getAsNamespace()->getOriginalNamespace()); 6786 6787 case NestedNameSpecifier::NamespaceAlias: 6788 // A namespace is canonical; build a nested-name-specifier with 6789 // this namespace and no prefix. 6790 return NestedNameSpecifier::Create(*this, nullptr, 6791 NNS->getAsNamespaceAlias()->getNamespace() 6792 ->getOriginalNamespace()); 6793 6794 // The difference between TypeSpec and TypeSpecWithTemplate is that the 6795 // latter will have the 'template' keyword when printed. 6796 case NestedNameSpecifier::TypeSpec: 6797 case NestedNameSpecifier::TypeSpecWithTemplate: { 6798 const Type *T = getCanonicalType(NNS->getAsType()); 6799 6800 // If we have some kind of dependent-named type (e.g., "typename T::type"), 6801 // break it apart into its prefix and identifier, then reconsititute those 6802 // as the canonical nested-name-specifier. This is required to canonicalize 6803 // a dependent nested-name-specifier involving typedefs of dependent-name 6804 // types, e.g., 6805 // typedef typename T::type T1; 6806 // typedef typename T1::type T2; 6807 if (const auto *DNT = T->getAs<DependentNameType>()) 6808 return NestedNameSpecifier::Create( 6809 *this, DNT->getQualifier(), 6810 const_cast<IdentifierInfo *>(DNT->getIdentifier())); 6811 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) 6812 return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, 6813 const_cast<Type *>(T)); 6814 6815 // TODO: Set 'Template' parameter to true for other template types. 6816 return NestedNameSpecifier::Create(*this, nullptr, false, 6817 const_cast<Type *>(T)); 6818 } 6819 6820 case NestedNameSpecifier::Global: 6821 case NestedNameSpecifier::Super: 6822 // The global specifier and __super specifer are canonical and unique. 6823 return NNS; 6824 } 6825 6826 llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); 6827 } 6828 6829 const ArrayType *ASTContext::getAsArrayType(QualType T) const { 6830 // Handle the non-qualified case efficiently. 6831 if (!T.hasLocalQualifiers()) { 6832 // Handle the common positive case fast. 6833 if (const auto *AT = dyn_cast<ArrayType>(T)) 6834 return AT; 6835 } 6836 6837 // Handle the common negative case fast. 6838 if (!isa<ArrayType>(T.getCanonicalType())) 6839 return nullptr; 6840 6841 // Apply any qualifiers from the array type to the element type. This 6842 // implements C99 6.7.3p8: "If the specification of an array type includes 6843 // any type qualifiers, the element type is so qualified, not the array type." 6844 6845 // If we get here, we either have type qualifiers on the type, or we have 6846 // sugar such as a typedef in the way. If we have type qualifiers on the type 6847 // we must propagate them down into the element type. 6848 6849 SplitQualType split = T.getSplitDesugaredType(); 6850 Qualifiers qs = split.Quals; 6851 6852 // If we have a simple case, just return now. 6853 const auto *ATy = dyn_cast<ArrayType>(split.Ty); 6854 if (!ATy || qs.empty()) 6855 return ATy; 6856 6857 // Otherwise, we have an array and we have qualifiers on it. Push the 6858 // qualifiers into the array element type and return a new array type. 6859 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); 6860 6861 if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy)) 6862 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), 6863 CAT->getSizeExpr(), 6864 CAT->getSizeModifier(), 6865 CAT->getIndexTypeCVRQualifiers())); 6866 if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy)) 6867 return cast<ArrayType>(getIncompleteArrayType(NewEltTy, 6868 IAT->getSizeModifier(), 6869 IAT->getIndexTypeCVRQualifiers())); 6870 6871 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy)) 6872 return cast<ArrayType>( 6873 getDependentSizedArrayType(NewEltTy, 6874 DSAT->getSizeExpr(), 6875 DSAT->getSizeModifier(), 6876 DSAT->getIndexTypeCVRQualifiers(), 6877 DSAT->getBracketsRange())); 6878 6879 const auto *VAT = cast<VariableArrayType>(ATy); 6880 return cast<ArrayType>(getVariableArrayType(NewEltTy, 6881 VAT->getSizeExpr(), 6882 VAT->getSizeModifier(), 6883 VAT->getIndexTypeCVRQualifiers(), 6884 VAT->getBracketsRange())); 6885 } 6886 6887 QualType ASTContext::getAdjustedParameterType(QualType T) const { 6888 if (T->isArrayType() || T->isFunctionType()) 6889 return getDecayedType(T); 6890 return T; 6891 } 6892 6893 QualType ASTContext::getSignatureParameterType(QualType T) const { 6894 T = getVariableArrayDecayedType(T); 6895 T = getAdjustedParameterType(T); 6896 return T.getUnqualifiedType(); 6897 } 6898 6899 QualType ASTContext::getExceptionObjectType(QualType T) const { 6900 // C++ [except.throw]p3: 6901 // A throw-expression initializes a temporary object, called the exception 6902 // object, the type of which is determined by removing any top-level 6903 // cv-qualifiers from the static type of the operand of throw and adjusting 6904 // the type from "array of T" or "function returning T" to "pointer to T" 6905 // or "pointer to function returning T", [...] 6906 T = getVariableArrayDecayedType(T); 6907 if (T->isArrayType() || T->isFunctionType()) 6908 T = getDecayedType(T); 6909 return T.getUnqualifiedType(); 6910 } 6911 6912 /// getArrayDecayedType - Return the properly qualified result of decaying the 6913 /// specified array type to a pointer. This operation is non-trivial when 6914 /// handling typedefs etc. The canonical type of "T" must be an array type, 6915 /// this returns a pointer to a properly qualified element of the array. 6916 /// 6917 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. 6918 QualType ASTContext::getArrayDecayedType(QualType Ty) const { 6919 // Get the element type with 'getAsArrayType' so that we don't lose any 6920 // typedefs in the element type of the array. This also handles propagation 6921 // of type qualifiers from the array type into the element type if present 6922 // (C99 6.7.3p8). 6923 const ArrayType *PrettyArrayType = getAsArrayType(Ty); 6924 assert(PrettyArrayType && "Not an array type!"); 6925 6926 QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); 6927 6928 // int x[restrict 4] -> int *restrict 6929 QualType Result = getQualifiedType(PtrTy, 6930 PrettyArrayType->getIndexTypeQualifiers()); 6931 6932 // int x[_Nullable] -> int * _Nullable 6933 if (auto Nullability = Ty->getNullability()) { 6934 Result = const_cast<ASTContext *>(this)->getAttributedType( 6935 AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); 6936 } 6937 return Result; 6938 } 6939 6940 QualType ASTContext::getBaseElementType(const ArrayType *array) const { 6941 return getBaseElementType(array->getElementType()); 6942 } 6943 6944 QualType ASTContext::getBaseElementType(QualType type) const { 6945 Qualifiers qs; 6946 while (true) { 6947 SplitQualType split = type.getSplitDesugaredType(); 6948 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); 6949 if (!array) break; 6950 6951 type = array->getElementType(); 6952 qs.addConsistentQualifiers(split.Quals); 6953 } 6954 6955 return getQualifiedType(type, qs); 6956 } 6957 6958 /// getConstantArrayElementCount - Returns number of constant array elements. 6959 uint64_t 6960 ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { 6961 uint64_t ElementCount = 1; 6962 do { 6963 ElementCount *= CA->getSize().getZExtValue(); 6964 CA = dyn_cast_or_null<ConstantArrayType>( 6965 CA->getElementType()->getAsArrayTypeUnsafe()); 6966 } while (CA); 6967 return ElementCount; 6968 } 6969 6970 uint64_t ASTContext::getArrayInitLoopExprElementCount( 6971 const ArrayInitLoopExpr *AILE) const { 6972 if (!AILE) 6973 return 0; 6974 6975 uint64_t ElementCount = 1; 6976 6977 do { 6978 ElementCount *= AILE->getArraySize().getZExtValue(); 6979 AILE = dyn_cast<ArrayInitLoopExpr>(AILE->getSubExpr()); 6980 } while (AILE); 6981 6982 return ElementCount; 6983 } 6984 6985 /// getFloatingRank - Return a relative rank for floating point types. 6986 /// This routine will assert if passed a built-in type that isn't a float. 6987 static FloatingRank getFloatingRank(QualType T) { 6988 if (const auto *CT = T->getAs<ComplexType>()) 6989 return getFloatingRank(CT->getElementType()); 6990 6991 switch (T->castAs<BuiltinType>()->getKind()) { 6992 default: llvm_unreachable("getFloatingRank(): not a floating type"); 6993 case BuiltinType::Float16: return Float16Rank; 6994 case BuiltinType::Half: return HalfRank; 6995 case BuiltinType::Float: return FloatRank; 6996 case BuiltinType::Double: return DoubleRank; 6997 case BuiltinType::LongDouble: return LongDoubleRank; 6998 case BuiltinType::Float128: return Float128Rank; 6999 case BuiltinType::BFloat16: return BFloat16Rank; 7000 case BuiltinType::Ibm128: return Ibm128Rank; 7001 } 7002 } 7003 7004 /// getFloatingTypeOrder - Compare the rank of the two specified floating 7005 /// point types, ignoring the domain of the type (i.e. 'double' == 7006 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If 7007 /// LHS < RHS, return -1. 7008 int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { 7009 FloatingRank LHSR = getFloatingRank(LHS); 7010 FloatingRank RHSR = getFloatingRank(RHS); 7011 7012 if (LHSR == RHSR) 7013 return 0; 7014 if (LHSR > RHSR) 7015 return 1; 7016 return -1; 7017 } 7018 7019 int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { 7020 if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) 7021 return 0; 7022 return getFloatingTypeOrder(LHS, RHS); 7023 } 7024 7025 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This 7026 /// routine will assert if passed a built-in type that isn't an integer or enum, 7027 /// or if it is not canonicalized. 7028 unsigned ASTContext::getIntegerRank(const Type *T) const { 7029 assert(T->isCanonicalUnqualified() && "T should be canonicalized"); 7030 7031 // Results in this 'losing' to any type of the same size, but winning if 7032 // larger. 7033 if (const auto *EIT = dyn_cast<BitIntType>(T)) 7034 return 0 + (EIT->getNumBits() << 3); 7035 7036 switch (cast<BuiltinType>(T)->getKind()) { 7037 default: llvm_unreachable("getIntegerRank(): not a built-in integer"); 7038 case BuiltinType::Bool: 7039 return 1 + (getIntWidth(BoolTy) << 3); 7040 case BuiltinType::Char_S: 7041 case BuiltinType::Char_U: 7042 case BuiltinType::SChar: 7043 case BuiltinType::UChar: 7044 return 2 + (getIntWidth(CharTy) << 3); 7045 case BuiltinType::Short: 7046 case BuiltinType::UShort: 7047 return 3 + (getIntWidth(ShortTy) << 3); 7048 case BuiltinType::Int: 7049 case BuiltinType::UInt: 7050 return 4 + (getIntWidth(IntTy) << 3); 7051 case BuiltinType::Long: 7052 case BuiltinType::ULong: 7053 return 5 + (getIntWidth(LongTy) << 3); 7054 case BuiltinType::LongLong: 7055 case BuiltinType::ULongLong: 7056 return 6 + (getIntWidth(LongLongTy) << 3); 7057 case BuiltinType::Int128: 7058 case BuiltinType::UInt128: 7059 return 7 + (getIntWidth(Int128Ty) << 3); 7060 7061 // "The ranks of char8_t, char16_t, char32_t, and wchar_t equal the ranks of 7062 // their underlying types" [c++20 conv.rank] 7063 case BuiltinType::Char8: 7064 return getIntegerRank(UnsignedCharTy.getTypePtr()); 7065 case BuiltinType::Char16: 7066 return getIntegerRank( 7067 getFromTargetType(Target->getChar16Type()).getTypePtr()); 7068 case BuiltinType::Char32: 7069 return getIntegerRank( 7070 getFromTargetType(Target->getChar32Type()).getTypePtr()); 7071 case BuiltinType::WChar_S: 7072 case BuiltinType::WChar_U: 7073 return getIntegerRank( 7074 getFromTargetType(Target->getWCharType()).getTypePtr()); 7075 } 7076 } 7077 7078 /// Whether this is a promotable bitfield reference according 7079 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). 7080 /// 7081 /// \returns the type this bit-field will promote to, or NULL if no 7082 /// promotion occurs. 7083 QualType ASTContext::isPromotableBitField(Expr *E) const { 7084 if (E->isTypeDependent() || E->isValueDependent()) 7085 return {}; 7086 7087 // C++ [conv.prom]p5: 7088 // If the bit-field has an enumerated type, it is treated as any other 7089 // value of that type for promotion purposes. 7090 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) 7091 return {}; 7092 7093 // FIXME: We should not do this unless E->refersToBitField() is true. This 7094 // matters in C where getSourceBitField() will find bit-fields for various 7095 // cases where the source expression is not a bit-field designator. 7096 7097 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? 7098 if (!Field) 7099 return {}; 7100 7101 QualType FT = Field->getType(); 7102 7103 uint64_t BitWidth = Field->getBitWidthValue(*this); 7104 uint64_t IntSize = getTypeSize(IntTy); 7105 // C++ [conv.prom]p5: 7106 // A prvalue for an integral bit-field can be converted to a prvalue of type 7107 // int if int can represent all the values of the bit-field; otherwise, it 7108 // can be converted to unsigned int if unsigned int can represent all the 7109 // values of the bit-field. If the bit-field is larger yet, no integral 7110 // promotion applies to it. 7111 // C11 6.3.1.1/2: 7112 // [For a bit-field of type _Bool, int, signed int, or unsigned int:] 7113 // If an int can represent all values of the original type (as restricted by 7114 // the width, for a bit-field), the value is converted to an int; otherwise, 7115 // it is converted to an unsigned int. 7116 // 7117 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. 7118 // We perform that promotion here to match GCC and C++. 7119 // FIXME: C does not permit promotion of an enum bit-field whose rank is 7120 // greater than that of 'int'. We perform that promotion to match GCC. 7121 if (BitWidth < IntSize) 7122 return IntTy; 7123 7124 if (BitWidth == IntSize) 7125 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; 7126 7127 // Bit-fields wider than int are not subject to promotions, and therefore act 7128 // like the base type. GCC has some weird bugs in this area that we 7129 // deliberately do not follow (GCC follows a pre-standard resolution to 7130 // C's DR315 which treats bit-width as being part of the type, and this leaks 7131 // into their semantics in some cases). 7132 return {}; 7133 } 7134 7135 /// getPromotedIntegerType - Returns the type that Promotable will 7136 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable 7137 /// integer type. 7138 QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { 7139 assert(!Promotable.isNull()); 7140 assert(isPromotableIntegerType(Promotable)); 7141 if (const auto *ET = Promotable->getAs<EnumType>()) 7142 return ET->getDecl()->getPromotionType(); 7143 7144 if (const auto *BT = Promotable->getAs<BuiltinType>()) { 7145 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t 7146 // (3.9.1) can be converted to a prvalue of the first of the following 7147 // types that can represent all the values of its underlying type: 7148 // int, unsigned int, long int, unsigned long int, long long int, or 7149 // unsigned long long int [...] 7150 // FIXME: Is there some better way to compute this? 7151 if (BT->getKind() == BuiltinType::WChar_S || 7152 BT->getKind() == BuiltinType::WChar_U || 7153 BT->getKind() == BuiltinType::Char8 || 7154 BT->getKind() == BuiltinType::Char16 || 7155 BT->getKind() == BuiltinType::Char32) { 7156 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; 7157 uint64_t FromSize = getTypeSize(BT); 7158 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, 7159 LongLongTy, UnsignedLongLongTy }; 7160 for (const auto &PT : PromoteTypes) { 7161 uint64_t ToSize = getTypeSize(PT); 7162 if (FromSize < ToSize || 7163 (FromSize == ToSize && FromIsSigned == PT->isSignedIntegerType())) 7164 return PT; 7165 } 7166 llvm_unreachable("char type should fit into long long"); 7167 } 7168 } 7169 7170 // At this point, we should have a signed or unsigned integer type. 7171 if (Promotable->isSignedIntegerType()) 7172 return IntTy; 7173 uint64_t PromotableSize = getIntWidth(Promotable); 7174 uint64_t IntSize = getIntWidth(IntTy); 7175 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); 7176 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; 7177 } 7178 7179 /// Recurses in pointer/array types until it finds an objc retainable 7180 /// type and returns its ownership. 7181 Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { 7182 while (!T.isNull()) { 7183 if (T.getObjCLifetime() != Qualifiers::OCL_None) 7184 return T.getObjCLifetime(); 7185 if (T->isArrayType()) 7186 T = getBaseElementType(T); 7187 else if (const auto *PT = T->getAs<PointerType>()) 7188 T = PT->getPointeeType(); 7189 else if (const auto *RT = T->getAs<ReferenceType>()) 7190 T = RT->getPointeeType(); 7191 else 7192 break; 7193 } 7194 7195 return Qualifiers::OCL_None; 7196 } 7197 7198 static const Type *getIntegerTypeForEnum(const EnumType *ET) { 7199 // Incomplete enum types are not treated as integer types. 7200 // FIXME: In C++, enum types are never integer types. 7201 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) 7202 return ET->getDecl()->getIntegerType().getTypePtr(); 7203 return nullptr; 7204 } 7205 7206 /// getIntegerTypeOrder - Returns the highest ranked integer type: 7207 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If 7208 /// LHS < RHS, return -1. 7209 int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { 7210 const Type *LHSC = getCanonicalType(LHS).getTypePtr(); 7211 const Type *RHSC = getCanonicalType(RHS).getTypePtr(); 7212 7213 // Unwrap enums to their underlying type. 7214 if (const auto *ET = dyn_cast<EnumType>(LHSC)) 7215 LHSC = getIntegerTypeForEnum(ET); 7216 if (const auto *ET = dyn_cast<EnumType>(RHSC)) 7217 RHSC = getIntegerTypeForEnum(ET); 7218 7219 if (LHSC == RHSC) return 0; 7220 7221 bool LHSUnsigned = LHSC->isUnsignedIntegerType(); 7222 bool RHSUnsigned = RHSC->isUnsignedIntegerType(); 7223 7224 unsigned LHSRank = getIntegerRank(LHSC); 7225 unsigned RHSRank = getIntegerRank(RHSC); 7226 7227 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. 7228 if (LHSRank == RHSRank) return 0; 7229 return LHSRank > RHSRank ? 1 : -1; 7230 } 7231 7232 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. 7233 if (LHSUnsigned) { 7234 // If the unsigned [LHS] type is larger, return it. 7235 if (LHSRank >= RHSRank) 7236 return 1; 7237 7238 // If the signed type can represent all values of the unsigned type, it 7239 // wins. Because we are dealing with 2's complement and types that are 7240 // powers of two larger than each other, this is always safe. 7241 return -1; 7242 } 7243 7244 // If the unsigned [RHS] type is larger, return it. 7245 if (RHSRank >= LHSRank) 7246 return -1; 7247 7248 // If the signed type can represent all values of the unsigned type, it 7249 // wins. Because we are dealing with 2's complement and types that are 7250 // powers of two larger than each other, this is always safe. 7251 return 1; 7252 } 7253 7254 TypedefDecl *ASTContext::getCFConstantStringDecl() const { 7255 if (CFConstantStringTypeDecl) 7256 return CFConstantStringTypeDecl; 7257 7258 assert(!CFConstantStringTagDecl && 7259 "tag and typedef should be initialized together"); 7260 CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); 7261 CFConstantStringTagDecl->startDefinition(); 7262 7263 struct { 7264 QualType Type; 7265 const char *Name; 7266 } Fields[5]; 7267 unsigned Count = 0; 7268 7269 /// Objective-C ABI 7270 /// 7271 /// typedef struct __NSConstantString_tag { 7272 /// const int *isa; 7273 /// int flags; 7274 /// const char *str; 7275 /// long length; 7276 /// } __NSConstantString; 7277 /// 7278 /// Swift ABI (4.1, 4.2) 7279 /// 7280 /// typedef struct __NSConstantString_tag { 7281 /// uintptr_t _cfisa; 7282 /// uintptr_t _swift_rc; 7283 /// _Atomic(uint64_t) _cfinfoa; 7284 /// const char *_ptr; 7285 /// uint32_t _length; 7286 /// } __NSConstantString; 7287 /// 7288 /// Swift ABI (5.0) 7289 /// 7290 /// typedef struct __NSConstantString_tag { 7291 /// uintptr_t _cfisa; 7292 /// uintptr_t _swift_rc; 7293 /// _Atomic(uint64_t) _cfinfoa; 7294 /// const char *_ptr; 7295 /// uintptr_t _length; 7296 /// } __NSConstantString; 7297 7298 const auto CFRuntime = getLangOpts().CFRuntime; 7299 if (static_cast<unsigned>(CFRuntime) < 7300 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) { 7301 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; 7302 Fields[Count++] = { IntTy, "flags" }; 7303 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; 7304 Fields[Count++] = { LongTy, "length" }; 7305 } else { 7306 Fields[Count++] = { getUIntPtrType(), "_cfisa" }; 7307 Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; 7308 Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; 7309 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; 7310 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || 7311 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) 7312 Fields[Count++] = { IntTy, "_ptr" }; 7313 else 7314 Fields[Count++] = { getUIntPtrType(), "_ptr" }; 7315 } 7316 7317 // Create fields 7318 for (unsigned i = 0; i < Count; ++i) { 7319 FieldDecl *Field = 7320 FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), 7321 SourceLocation(), &Idents.get(Fields[i].Name), 7322 Fields[i].Type, /*TInfo=*/nullptr, 7323 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7324 Field->setAccess(AS_public); 7325 CFConstantStringTagDecl->addDecl(Field); 7326 } 7327 7328 CFConstantStringTagDecl->completeDefinition(); 7329 // This type is designed to be compatible with NSConstantString, but cannot 7330 // use the same name, since NSConstantString is an interface. 7331 auto tagType = getTagDeclType(CFConstantStringTagDecl); 7332 CFConstantStringTypeDecl = 7333 buildImplicitTypedef(tagType, "__NSConstantString"); 7334 7335 return CFConstantStringTypeDecl; 7336 } 7337 7338 RecordDecl *ASTContext::getCFConstantStringTagDecl() const { 7339 if (!CFConstantStringTagDecl) 7340 getCFConstantStringDecl(); // Build the tag and the typedef. 7341 return CFConstantStringTagDecl; 7342 } 7343 7344 // getCFConstantStringType - Return the type used for constant CFStrings. 7345 QualType ASTContext::getCFConstantStringType() const { 7346 return getTypedefType(getCFConstantStringDecl()); 7347 } 7348 7349 QualType ASTContext::getObjCSuperType() const { 7350 if (ObjCSuperType.isNull()) { 7351 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); 7352 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl); 7353 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); 7354 } 7355 return ObjCSuperType; 7356 } 7357 7358 void ASTContext::setCFConstantStringType(QualType T) { 7359 const auto *TD = T->castAs<TypedefType>(); 7360 CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl()); 7361 const auto *TagType = 7362 CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>(); 7363 CFConstantStringTagDecl = TagType->getDecl(); 7364 } 7365 7366 QualType ASTContext::getBlockDescriptorType() const { 7367 if (BlockDescriptorType) 7368 return getTagDeclType(BlockDescriptorType); 7369 7370 RecordDecl *RD; 7371 // FIXME: Needs the FlagAppleBlock bit. 7372 RD = buildImplicitRecord("__block_descriptor"); 7373 RD->startDefinition(); 7374 7375 QualType FieldTypes[] = { 7376 UnsignedLongTy, 7377 UnsignedLongTy, 7378 }; 7379 7380 static const char *const FieldNames[] = { 7381 "reserved", 7382 "Size" 7383 }; 7384 7385 for (size_t i = 0; i < 2; ++i) { 7386 FieldDecl *Field = FieldDecl::Create( 7387 *this, RD, SourceLocation(), SourceLocation(), 7388 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7389 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 7390 Field->setAccess(AS_public); 7391 RD->addDecl(Field); 7392 } 7393 7394 RD->completeDefinition(); 7395 7396 BlockDescriptorType = RD; 7397 7398 return getTagDeclType(BlockDescriptorType); 7399 } 7400 7401 QualType ASTContext::getBlockDescriptorExtendedType() const { 7402 if (BlockDescriptorExtendedType) 7403 return getTagDeclType(BlockDescriptorExtendedType); 7404 7405 RecordDecl *RD; 7406 // FIXME: Needs the FlagAppleBlock bit. 7407 RD = buildImplicitRecord("__block_descriptor_withcopydispose"); 7408 RD->startDefinition(); 7409 7410 QualType FieldTypes[] = { 7411 UnsignedLongTy, 7412 UnsignedLongTy, 7413 getPointerType(VoidPtrTy), 7414 getPointerType(VoidPtrTy) 7415 }; 7416 7417 static const char *const FieldNames[] = { 7418 "reserved", 7419 "Size", 7420 "CopyFuncPtr", 7421 "DestroyFuncPtr" 7422 }; 7423 7424 for (size_t i = 0; i < 4; ++i) { 7425 FieldDecl *Field = FieldDecl::Create( 7426 *this, RD, SourceLocation(), SourceLocation(), 7427 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 7428 /*BitWidth=*/nullptr, 7429 /*Mutable=*/false, ICIS_NoInit); 7430 Field->setAccess(AS_public); 7431 RD->addDecl(Field); 7432 } 7433 7434 RD->completeDefinition(); 7435 7436 BlockDescriptorExtendedType = RD; 7437 return getTagDeclType(BlockDescriptorExtendedType); 7438 } 7439 7440 OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { 7441 const auto *BT = dyn_cast<BuiltinType>(T); 7442 7443 if (!BT) { 7444 if (isa<PipeType>(T)) 7445 return OCLTK_Pipe; 7446 7447 return OCLTK_Default; 7448 } 7449 7450 switch (BT->getKind()) { 7451 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7452 case BuiltinType::Id: \ 7453 return OCLTK_Image; 7454 #include "clang/Basic/OpenCLImageTypes.def" 7455 7456 case BuiltinType::OCLClkEvent: 7457 return OCLTK_ClkEvent; 7458 7459 case BuiltinType::OCLEvent: 7460 return OCLTK_Event; 7461 7462 case BuiltinType::OCLQueue: 7463 return OCLTK_Queue; 7464 7465 case BuiltinType::OCLReserveID: 7466 return OCLTK_ReserveID; 7467 7468 case BuiltinType::OCLSampler: 7469 return OCLTK_Sampler; 7470 7471 default: 7472 return OCLTK_Default; 7473 } 7474 } 7475 7476 LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { 7477 return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 7478 } 7479 7480 /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" 7481 /// requires copy/dispose. Note that this must match the logic 7482 /// in buildByrefHelpers. 7483 bool ASTContext::BlockRequiresCopying(QualType Ty, 7484 const VarDecl *D) { 7485 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { 7486 const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); 7487 if (!copyExpr && record->hasTrivialDestructor()) return false; 7488 7489 return true; 7490 } 7491 7492 // The block needs copy/destroy helpers if Ty is non-trivial to destructively 7493 // move or destroy. 7494 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) 7495 return true; 7496 7497 if (!Ty->isObjCRetainableType()) return false; 7498 7499 Qualifiers qs = Ty.getQualifiers(); 7500 7501 // If we have lifetime, that dominates. 7502 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { 7503 switch (lifetime) { 7504 case Qualifiers::OCL_None: llvm_unreachable("impossible"); 7505 7506 // These are just bits as far as the runtime is concerned. 7507 case Qualifiers::OCL_ExplicitNone: 7508 case Qualifiers::OCL_Autoreleasing: 7509 return false; 7510 7511 // These cases should have been taken care of when checking the type's 7512 // non-triviality. 7513 case Qualifiers::OCL_Weak: 7514 case Qualifiers::OCL_Strong: 7515 llvm_unreachable("impossible"); 7516 } 7517 llvm_unreachable("fell out of lifetime switch!"); 7518 } 7519 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || 7520 Ty->isObjCObjectPointerType()); 7521 } 7522 7523 bool ASTContext::getByrefLifetime(QualType Ty, 7524 Qualifiers::ObjCLifetime &LifeTime, 7525 bool &HasByrefExtendedLayout) const { 7526 if (!getLangOpts().ObjC || 7527 getLangOpts().getGC() != LangOptions::NonGC) 7528 return false; 7529 7530 HasByrefExtendedLayout = false; 7531 if (Ty->isRecordType()) { 7532 HasByrefExtendedLayout = true; 7533 LifeTime = Qualifiers::OCL_None; 7534 } else if ((LifeTime = Ty.getObjCLifetime())) { 7535 // Honor the ARC qualifiers. 7536 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { 7537 // The MRR rule. 7538 LifeTime = Qualifiers::OCL_ExplicitNone; 7539 } else { 7540 LifeTime = Qualifiers::OCL_None; 7541 } 7542 return true; 7543 } 7544 7545 CanQualType ASTContext::getNSUIntegerType() const { 7546 assert(Target && "Expected target to be initialized"); 7547 const llvm::Triple &T = Target->getTriple(); 7548 // Windows is LLP64 rather than LP64 7549 if (T.isOSWindows() && T.isArch64Bit()) 7550 return UnsignedLongLongTy; 7551 return UnsignedLongTy; 7552 } 7553 7554 CanQualType ASTContext::getNSIntegerType() const { 7555 assert(Target && "Expected target to be initialized"); 7556 const llvm::Triple &T = Target->getTriple(); 7557 // Windows is LLP64 rather than LP64 7558 if (T.isOSWindows() && T.isArch64Bit()) 7559 return LongLongTy; 7560 return LongTy; 7561 } 7562 7563 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { 7564 if (!ObjCInstanceTypeDecl) 7565 ObjCInstanceTypeDecl = 7566 buildImplicitTypedef(getObjCIdType(), "instancetype"); 7567 return ObjCInstanceTypeDecl; 7568 } 7569 7570 // This returns true if a type has been typedefed to BOOL: 7571 // typedef <type> BOOL; 7572 static bool isTypeTypedefedAsBOOL(QualType T) { 7573 if (const auto *TT = dyn_cast<TypedefType>(T)) 7574 if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) 7575 return II->isStr("BOOL"); 7576 7577 return false; 7578 } 7579 7580 /// getObjCEncodingTypeSize returns size of type for objective-c encoding 7581 /// purpose. 7582 CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { 7583 if (!type->isIncompleteArrayType() && type->isIncompleteType()) 7584 return CharUnits::Zero(); 7585 7586 CharUnits sz = getTypeSizeInChars(type); 7587 7588 // Make all integer and enum types at least as large as an int 7589 if (sz.isPositive() && type->isIntegralOrEnumerationType()) 7590 sz = std::max(sz, getTypeSizeInChars(IntTy)); 7591 // Treat arrays as pointers, since that's how they're passed in. 7592 else if (type->isArrayType()) 7593 sz = getTypeSizeInChars(VoidPtrTy); 7594 return sz; 7595 } 7596 7597 bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { 7598 return getTargetInfo().getCXXABI().isMicrosoft() && 7599 VD->isStaticDataMember() && 7600 VD->getType()->isIntegralOrEnumerationType() && 7601 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); 7602 } 7603 7604 ASTContext::InlineVariableDefinitionKind 7605 ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { 7606 if (!VD->isInline()) 7607 return InlineVariableDefinitionKind::None; 7608 7609 // In almost all cases, it's a weak definition. 7610 auto *First = VD->getFirstDecl(); 7611 if (First->isInlineSpecified() || !First->isStaticDataMember()) 7612 return InlineVariableDefinitionKind::Weak; 7613 7614 // If there's a file-context declaration in this translation unit, it's a 7615 // non-discardable definition. 7616 for (auto *D : VD->redecls()) 7617 if (D->getLexicalDeclContext()->isFileContext() && 7618 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) 7619 return InlineVariableDefinitionKind::Strong; 7620 7621 // If we've not seen one yet, we don't know. 7622 return InlineVariableDefinitionKind::WeakUnknown; 7623 } 7624 7625 static std::string charUnitsToString(const CharUnits &CU) { 7626 return llvm::itostr(CU.getQuantity()); 7627 } 7628 7629 /// getObjCEncodingForBlock - Return the encoded type for this block 7630 /// declaration. 7631 std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { 7632 std::string S; 7633 7634 const BlockDecl *Decl = Expr->getBlockDecl(); 7635 QualType BlockTy = 7636 Expr->getType()->castAs<BlockPointerType>()->getPointeeType(); 7637 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType(); 7638 // Encode result type. 7639 if (getLangOpts().EncodeExtendedBlockSig) 7640 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, 7641 true /*Extended*/); 7642 else 7643 getObjCEncodingForType(BlockReturnTy, S); 7644 // Compute size of all parameters. 7645 // Start with computing size of a pointer in number of bytes. 7646 // FIXME: There might(should) be a better way of doing this computation! 7647 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7648 CharUnits ParmOffset = PtrSize; 7649 for (auto *PI : Decl->parameters()) { 7650 QualType PType = PI->getType(); 7651 CharUnits sz = getObjCEncodingTypeSize(PType); 7652 if (sz.isZero()) 7653 continue; 7654 assert(sz.isPositive() && "BlockExpr - Incomplete param type"); 7655 ParmOffset += sz; 7656 } 7657 // Size of the argument frame 7658 S += charUnitsToString(ParmOffset); 7659 // Block pointer and offset. 7660 S += "@?0"; 7661 7662 // Argument types. 7663 ParmOffset = PtrSize; 7664 for (auto *PVDecl : Decl->parameters()) { 7665 QualType PType = PVDecl->getOriginalType(); 7666 if (const auto *AT = 7667 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7668 // Use array's original type only if it has known number of 7669 // elements. 7670 if (!isa<ConstantArrayType>(AT)) 7671 PType = PVDecl->getType(); 7672 } else if (PType->isFunctionType()) 7673 PType = PVDecl->getType(); 7674 if (getLangOpts().EncodeExtendedBlockSig) 7675 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, 7676 S, true /*Extended*/); 7677 else 7678 getObjCEncodingForType(PType, S); 7679 S += charUnitsToString(ParmOffset); 7680 ParmOffset += getObjCEncodingTypeSize(PType); 7681 } 7682 7683 return S; 7684 } 7685 7686 std::string 7687 ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { 7688 std::string S; 7689 // Encode result type. 7690 getObjCEncodingForType(Decl->getReturnType(), S); 7691 CharUnits ParmOffset; 7692 // Compute size of all parameters. 7693 for (auto *PI : Decl->parameters()) { 7694 QualType PType = PI->getType(); 7695 CharUnits sz = getObjCEncodingTypeSize(PType); 7696 if (sz.isZero()) 7697 continue; 7698 7699 assert(sz.isPositive() && 7700 "getObjCEncodingForFunctionDecl - Incomplete param type"); 7701 ParmOffset += sz; 7702 } 7703 S += charUnitsToString(ParmOffset); 7704 ParmOffset = CharUnits::Zero(); 7705 7706 // Argument types. 7707 for (auto *PVDecl : Decl->parameters()) { 7708 QualType PType = PVDecl->getOriginalType(); 7709 if (const auto *AT = 7710 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7711 // Use array's original type only if it has known number of 7712 // elements. 7713 if (!isa<ConstantArrayType>(AT)) 7714 PType = PVDecl->getType(); 7715 } else if (PType->isFunctionType()) 7716 PType = PVDecl->getType(); 7717 getObjCEncodingForType(PType, S); 7718 S += charUnitsToString(ParmOffset); 7719 ParmOffset += getObjCEncodingTypeSize(PType); 7720 } 7721 7722 return S; 7723 } 7724 7725 /// getObjCEncodingForMethodParameter - Return the encoded type for a single 7726 /// method parameter or return type. If Extended, include class names and 7727 /// block object types. 7728 void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, 7729 QualType T, std::string& S, 7730 bool Extended) const { 7731 // Encode type qualifier, 'in', 'inout', etc. for the parameter. 7732 getObjCEncodingForTypeQualifier(QT, S); 7733 // Encode parameter type. 7734 ObjCEncOptions Options = ObjCEncOptions() 7735 .setExpandPointedToStructures() 7736 .setExpandStructures() 7737 .setIsOutermostType(); 7738 if (Extended) 7739 Options.setEncodeBlockParameters().setEncodeClassNames(); 7740 getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); 7741 } 7742 7743 /// getObjCEncodingForMethodDecl - Return the encoded type for this method 7744 /// declaration. 7745 std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, 7746 bool Extended) const { 7747 // FIXME: This is not very efficient. 7748 // Encode return type. 7749 std::string S; 7750 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), 7751 Decl->getReturnType(), S, Extended); 7752 // Compute size of all parameters. 7753 // Start with computing size of a pointer in number of bytes. 7754 // FIXME: There might(should) be a better way of doing this computation! 7755 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7756 // The first two arguments (self and _cmd) are pointers; account for 7757 // their size. 7758 CharUnits ParmOffset = 2 * PtrSize; 7759 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7760 E = Decl->sel_param_end(); PI != E; ++PI) { 7761 QualType PType = (*PI)->getType(); 7762 CharUnits sz = getObjCEncodingTypeSize(PType); 7763 if (sz.isZero()) 7764 continue; 7765 7766 assert(sz.isPositive() && 7767 "getObjCEncodingForMethodDecl - Incomplete param type"); 7768 ParmOffset += sz; 7769 } 7770 S += charUnitsToString(ParmOffset); 7771 S += "@0:"; 7772 S += charUnitsToString(PtrSize); 7773 7774 // Argument types. 7775 ParmOffset = 2 * PtrSize; 7776 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7777 E = Decl->sel_param_end(); PI != E; ++PI) { 7778 const ParmVarDecl *PVDecl = *PI; 7779 QualType PType = PVDecl->getOriginalType(); 7780 if (const auto *AT = 7781 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7782 // Use array's original type only if it has known number of 7783 // elements. 7784 if (!isa<ConstantArrayType>(AT)) 7785 PType = PVDecl->getType(); 7786 } else if (PType->isFunctionType()) 7787 PType = PVDecl->getType(); 7788 getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), 7789 PType, S, Extended); 7790 S += charUnitsToString(ParmOffset); 7791 ParmOffset += getObjCEncodingTypeSize(PType); 7792 } 7793 7794 return S; 7795 } 7796 7797 ObjCPropertyImplDecl * 7798 ASTContext::getObjCPropertyImplDeclForPropertyDecl( 7799 const ObjCPropertyDecl *PD, 7800 const Decl *Container) const { 7801 if (!Container) 7802 return nullptr; 7803 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) { 7804 for (auto *PID : CID->property_impls()) 7805 if (PID->getPropertyDecl() == PD) 7806 return PID; 7807 } else { 7808 const auto *OID = cast<ObjCImplementationDecl>(Container); 7809 for (auto *PID : OID->property_impls()) 7810 if (PID->getPropertyDecl() == PD) 7811 return PID; 7812 } 7813 return nullptr; 7814 } 7815 7816 /// getObjCEncodingForPropertyDecl - Return the encoded type for this 7817 /// property declaration. If non-NULL, Container must be either an 7818 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be 7819 /// NULL when getting encodings for protocol properties. 7820 /// Property attributes are stored as a comma-delimited C string. The simple 7821 /// attributes readonly and bycopy are encoded as single characters. The 7822 /// parametrized attributes, getter=name, setter=name, and ivar=name, are 7823 /// encoded as single characters, followed by an identifier. Property types 7824 /// are also encoded as a parametrized attribute. The characters used to encode 7825 /// these attributes are defined by the following enumeration: 7826 /// @code 7827 /// enum PropertyAttributes { 7828 /// kPropertyReadOnly = 'R', // property is read-only. 7829 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned 7830 /// kPropertyByref = '&', // property is a reference to the value last assigned 7831 /// kPropertyDynamic = 'D', // property is dynamic 7832 /// kPropertyGetter = 'G', // followed by getter selector name 7833 /// kPropertySetter = 'S', // followed by setter selector name 7834 /// kPropertyInstanceVariable = 'V' // followed by instance variable name 7835 /// kPropertyType = 'T' // followed by old-style type encoding. 7836 /// kPropertyWeak = 'W' // 'weak' property 7837 /// kPropertyStrong = 'P' // property GC'able 7838 /// kPropertyNonAtomic = 'N' // property non-atomic 7839 /// kPropertyOptional = '?' // property optional 7840 /// }; 7841 /// @endcode 7842 std::string 7843 ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, 7844 const Decl *Container) const { 7845 // Collect information from the property implementation decl(s). 7846 bool Dynamic = false; 7847 ObjCPropertyImplDecl *SynthesizePID = nullptr; 7848 7849 if (ObjCPropertyImplDecl *PropertyImpDecl = 7850 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { 7851 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) 7852 Dynamic = true; 7853 else 7854 SynthesizePID = PropertyImpDecl; 7855 } 7856 7857 // FIXME: This is not very efficient. 7858 std::string S = "T"; 7859 7860 // Encode result type. 7861 // GCC has some special rules regarding encoding of properties which 7862 // closely resembles encoding of ivars. 7863 getObjCEncodingForPropertyType(PD->getType(), S); 7864 7865 if (PD->isOptional()) 7866 S += ",?"; 7867 7868 if (PD->isReadOnly()) { 7869 S += ",R"; 7870 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) 7871 S += ",C"; 7872 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) 7873 S += ",&"; 7874 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) 7875 S += ",W"; 7876 } else { 7877 switch (PD->getSetterKind()) { 7878 case ObjCPropertyDecl::Assign: break; 7879 case ObjCPropertyDecl::Copy: S += ",C"; break; 7880 case ObjCPropertyDecl::Retain: S += ",&"; break; 7881 case ObjCPropertyDecl::Weak: S += ",W"; break; 7882 } 7883 } 7884 7885 // It really isn't clear at all what this means, since properties 7886 // are "dynamic by default". 7887 if (Dynamic) 7888 S += ",D"; 7889 7890 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) 7891 S += ",N"; 7892 7893 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { 7894 S += ",G"; 7895 S += PD->getGetterName().getAsString(); 7896 } 7897 7898 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { 7899 S += ",S"; 7900 S += PD->getSetterName().getAsString(); 7901 } 7902 7903 if (SynthesizePID) { 7904 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); 7905 S += ",V"; 7906 S += OID->getNameAsString(); 7907 } 7908 7909 // FIXME: OBJCGC: weak & strong 7910 return S; 7911 } 7912 7913 /// getLegacyIntegralTypeEncoding - 7914 /// Another legacy compatibility encoding: 32-bit longs are encoded as 7915 /// 'l' or 'L' , but not always. For typedefs, we need to use 7916 /// 'i' or 'I' instead if encoding a struct field, or a pointer! 7917 void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { 7918 if (PointeeTy->getAs<TypedefType>()) { 7919 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) { 7920 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) 7921 PointeeTy = UnsignedIntTy; 7922 else 7923 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) 7924 PointeeTy = IntTy; 7925 } 7926 } 7927 } 7928 7929 void ASTContext::getObjCEncodingForType(QualType T, std::string& S, 7930 const FieldDecl *Field, 7931 QualType *NotEncodedT) const { 7932 // We follow the behavior of gcc, expanding structures which are 7933 // directly pointed to, and expanding embedded structures. Note that 7934 // these rules are sufficient to prevent recursive encoding of the 7935 // same type. 7936 getObjCEncodingForTypeImpl(T, S, 7937 ObjCEncOptions() 7938 .setExpandPointedToStructures() 7939 .setExpandStructures() 7940 .setIsOutermostType(), 7941 Field, NotEncodedT); 7942 } 7943 7944 void ASTContext::getObjCEncodingForPropertyType(QualType T, 7945 std::string& S) const { 7946 // Encode result type. 7947 // GCC has some special rules regarding encoding of properties which 7948 // closely resembles encoding of ivars. 7949 getObjCEncodingForTypeImpl(T, S, 7950 ObjCEncOptions() 7951 .setExpandPointedToStructures() 7952 .setExpandStructures() 7953 .setIsOutermostType() 7954 .setEncodingProperty(), 7955 /*Field=*/nullptr); 7956 } 7957 7958 static char getObjCEncodingForPrimitiveType(const ASTContext *C, 7959 const BuiltinType *BT) { 7960 BuiltinType::Kind kind = BT->getKind(); 7961 switch (kind) { 7962 case BuiltinType::Void: return 'v'; 7963 case BuiltinType::Bool: return 'B'; 7964 case BuiltinType::Char8: 7965 case BuiltinType::Char_U: 7966 case BuiltinType::UChar: return 'C'; 7967 case BuiltinType::Char16: 7968 case BuiltinType::UShort: return 'S'; 7969 case BuiltinType::Char32: 7970 case BuiltinType::UInt: return 'I'; 7971 case BuiltinType::ULong: 7972 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; 7973 case BuiltinType::UInt128: return 'T'; 7974 case BuiltinType::ULongLong: return 'Q'; 7975 case BuiltinType::Char_S: 7976 case BuiltinType::SChar: return 'c'; 7977 case BuiltinType::Short: return 's'; 7978 case BuiltinType::WChar_S: 7979 case BuiltinType::WChar_U: 7980 case BuiltinType::Int: return 'i'; 7981 case BuiltinType::Long: 7982 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; 7983 case BuiltinType::LongLong: return 'q'; 7984 case BuiltinType::Int128: return 't'; 7985 case BuiltinType::Float: return 'f'; 7986 case BuiltinType::Double: return 'd'; 7987 case BuiltinType::LongDouble: return 'D'; 7988 case BuiltinType::NullPtr: return '*'; // like char* 7989 7990 case BuiltinType::BFloat16: 7991 case BuiltinType::Float16: 7992 case BuiltinType::Float128: 7993 case BuiltinType::Ibm128: 7994 case BuiltinType::Half: 7995 case BuiltinType::ShortAccum: 7996 case BuiltinType::Accum: 7997 case BuiltinType::LongAccum: 7998 case BuiltinType::UShortAccum: 7999 case BuiltinType::UAccum: 8000 case BuiltinType::ULongAccum: 8001 case BuiltinType::ShortFract: 8002 case BuiltinType::Fract: 8003 case BuiltinType::LongFract: 8004 case BuiltinType::UShortFract: 8005 case BuiltinType::UFract: 8006 case BuiltinType::ULongFract: 8007 case BuiltinType::SatShortAccum: 8008 case BuiltinType::SatAccum: 8009 case BuiltinType::SatLongAccum: 8010 case BuiltinType::SatUShortAccum: 8011 case BuiltinType::SatUAccum: 8012 case BuiltinType::SatULongAccum: 8013 case BuiltinType::SatShortFract: 8014 case BuiltinType::SatFract: 8015 case BuiltinType::SatLongFract: 8016 case BuiltinType::SatUShortFract: 8017 case BuiltinType::SatUFract: 8018 case BuiltinType::SatULongFract: 8019 // FIXME: potentially need @encodes for these! 8020 return ' '; 8021 8022 #define SVE_TYPE(Name, Id, SingletonId) \ 8023 case BuiltinType::Id: 8024 #include "clang/Basic/AArch64SVEACLETypes.def" 8025 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 8026 #include "clang/Basic/RISCVVTypes.def" 8027 #define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 8028 #include "clang/Basic/WebAssemblyReferenceTypes.def" 8029 { 8030 DiagnosticsEngine &Diags = C->getDiagnostics(); 8031 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, 8032 "cannot yet @encode type %0"); 8033 Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); 8034 return ' '; 8035 } 8036 8037 case BuiltinType::ObjCId: 8038 case BuiltinType::ObjCClass: 8039 case BuiltinType::ObjCSel: 8040 llvm_unreachable("@encoding ObjC primitive type"); 8041 8042 // OpenCL and placeholder types don't need @encodings. 8043 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 8044 case BuiltinType::Id: 8045 #include "clang/Basic/OpenCLImageTypes.def" 8046 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 8047 case BuiltinType::Id: 8048 #include "clang/Basic/OpenCLExtensionTypes.def" 8049 case BuiltinType::OCLEvent: 8050 case BuiltinType::OCLClkEvent: 8051 case BuiltinType::OCLQueue: 8052 case BuiltinType::OCLReserveID: 8053 case BuiltinType::OCLSampler: 8054 case BuiltinType::Dependent: 8055 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 8056 case BuiltinType::Id: 8057 #include "clang/Basic/PPCTypes.def" 8058 #define BUILTIN_TYPE(KIND, ID) 8059 #define PLACEHOLDER_TYPE(KIND, ID) \ 8060 case BuiltinType::KIND: 8061 #include "clang/AST/BuiltinTypes.def" 8062 llvm_unreachable("invalid builtin type for @encode"); 8063 } 8064 llvm_unreachable("invalid BuiltinType::Kind value"); 8065 } 8066 8067 static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { 8068 EnumDecl *Enum = ET->getDecl(); 8069 8070 // The encoding of an non-fixed enum type is always 'i', regardless of size. 8071 if (!Enum->isFixed()) 8072 return 'i'; 8073 8074 // The encoding of a fixed enum type matches its fixed underlying type. 8075 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>(); 8076 return getObjCEncodingForPrimitiveType(C, BT); 8077 } 8078 8079 static void EncodeBitField(const ASTContext *Ctx, std::string& S, 8080 QualType T, const FieldDecl *FD) { 8081 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); 8082 S += 'b'; 8083 // The NeXT runtime encodes bit fields as b followed by the number of bits. 8084 // The GNU runtime requires more information; bitfields are encoded as b, 8085 // then the offset (in bits) of the first element, then the type of the 8086 // bitfield, then the size in bits. For example, in this structure: 8087 // 8088 // struct 8089 // { 8090 // int integer; 8091 // int flags:2; 8092 // }; 8093 // On a 32-bit system, the encoding for flags would be b2 for the NeXT 8094 // runtime, but b32i2 for the GNU runtime. The reason for this extra 8095 // information is not especially sensible, but we're stuck with it for 8096 // compatibility with GCC, although providing it breaks anything that 8097 // actually uses runtime introspection and wants to work on both runtimes... 8098 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { 8099 uint64_t Offset; 8100 8101 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) { 8102 Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, 8103 IVD); 8104 } else { 8105 const RecordDecl *RD = FD->getParent(); 8106 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); 8107 Offset = RL.getFieldOffset(FD->getFieldIndex()); 8108 } 8109 8110 S += llvm::utostr(Offset); 8111 8112 if (const auto *ET = T->getAs<EnumType>()) 8113 S += ObjCEncodingForEnumType(Ctx, ET); 8114 else { 8115 const auto *BT = T->castAs<BuiltinType>(); 8116 S += getObjCEncodingForPrimitiveType(Ctx, BT); 8117 } 8118 } 8119 S += llvm::utostr(FD->getBitWidthValue(*Ctx)); 8120 } 8121 8122 // Helper function for determining whether the encoded type string would include 8123 // a template specialization type. 8124 static bool hasTemplateSpecializationInEncodedString(const Type *T, 8125 bool VisitBasesAndFields) { 8126 T = T->getBaseElementTypeUnsafe(); 8127 8128 if (auto *PT = T->getAs<PointerType>()) 8129 return hasTemplateSpecializationInEncodedString( 8130 PT->getPointeeType().getTypePtr(), false); 8131 8132 auto *CXXRD = T->getAsCXXRecordDecl(); 8133 8134 if (!CXXRD) 8135 return false; 8136 8137 if (isa<ClassTemplateSpecializationDecl>(CXXRD)) 8138 return true; 8139 8140 if (!CXXRD->hasDefinition() || !VisitBasesAndFields) 8141 return false; 8142 8143 for (const auto &B : CXXRD->bases()) 8144 if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), 8145 true)) 8146 return true; 8147 8148 for (auto *FD : CXXRD->fields()) 8149 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(), 8150 true)) 8151 return true; 8152 8153 return false; 8154 } 8155 8156 // FIXME: Use SmallString for accumulating string. 8157 void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, 8158 const ObjCEncOptions Options, 8159 const FieldDecl *FD, 8160 QualType *NotEncodedT) const { 8161 CanQualType CT = getCanonicalType(T); 8162 switch (CT->getTypeClass()) { 8163 case Type::Builtin: 8164 case Type::Enum: 8165 if (FD && FD->isBitField()) 8166 return EncodeBitField(this, S, T, FD); 8167 if (const auto *BT = dyn_cast<BuiltinType>(CT)) 8168 S += getObjCEncodingForPrimitiveType(this, BT); 8169 else 8170 S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); 8171 return; 8172 8173 case Type::Complex: 8174 S += 'j'; 8175 getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S, 8176 ObjCEncOptions(), 8177 /*Field=*/nullptr); 8178 return; 8179 8180 case Type::Atomic: 8181 S += 'A'; 8182 getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S, 8183 ObjCEncOptions(), 8184 /*Field=*/nullptr); 8185 return; 8186 8187 // encoding for pointer or reference types. 8188 case Type::Pointer: 8189 case Type::LValueReference: 8190 case Type::RValueReference: { 8191 QualType PointeeTy; 8192 if (isa<PointerType>(CT)) { 8193 const auto *PT = T->castAs<PointerType>(); 8194 if (PT->isObjCSelType()) { 8195 S += ':'; 8196 return; 8197 } 8198 PointeeTy = PT->getPointeeType(); 8199 } else { 8200 PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); 8201 } 8202 8203 bool isReadOnly = false; 8204 // For historical/compatibility reasons, the read-only qualifier of the 8205 // pointee gets emitted _before_ the '^'. The read-only qualifier of 8206 // the pointer itself gets ignored, _unless_ we are looking at a typedef! 8207 // Also, do not emit the 'r' for anything but the outermost type! 8208 if (T->getAs<TypedefType>()) { 8209 if (Options.IsOutermostType() && T.isConstQualified()) { 8210 isReadOnly = true; 8211 S += 'r'; 8212 } 8213 } else if (Options.IsOutermostType()) { 8214 QualType P = PointeeTy; 8215 while (auto PT = P->getAs<PointerType>()) 8216 P = PT->getPointeeType(); 8217 if (P.isConstQualified()) { 8218 isReadOnly = true; 8219 S += 'r'; 8220 } 8221 } 8222 if (isReadOnly) { 8223 // Another legacy compatibility encoding. Some ObjC qualifier and type 8224 // combinations need to be rearranged. 8225 // Rewrite "in const" from "nr" to "rn" 8226 if (StringRef(S).ends_with("nr")) 8227 S.replace(S.end()-2, S.end(), "rn"); 8228 } 8229 8230 if (PointeeTy->isCharType()) { 8231 // char pointer types should be encoded as '*' unless it is a 8232 // type that has been typedef'd to 'BOOL'. 8233 if (!isTypeTypedefedAsBOOL(PointeeTy)) { 8234 S += '*'; 8235 return; 8236 } 8237 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) { 8238 // GCC binary compat: Need to convert "struct objc_class *" to "#". 8239 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { 8240 S += '#'; 8241 return; 8242 } 8243 // GCC binary compat: Need to convert "struct objc_object *" to "@". 8244 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { 8245 S += '@'; 8246 return; 8247 } 8248 // If the encoded string for the class includes template names, just emit 8249 // "^v" for pointers to the class. 8250 if (getLangOpts().CPlusPlus && 8251 (!getLangOpts().EncodeCXXClassTemplateSpec && 8252 hasTemplateSpecializationInEncodedString( 8253 RTy, Options.ExpandPointedToStructures()))) { 8254 S += "^v"; 8255 return; 8256 } 8257 // fall through... 8258 } 8259 S += '^'; 8260 getLegacyIntegralTypeEncoding(PointeeTy); 8261 8262 ObjCEncOptions NewOptions; 8263 if (Options.ExpandPointedToStructures()) 8264 NewOptions.setExpandStructures(); 8265 getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, 8266 /*Field=*/nullptr, NotEncodedT); 8267 return; 8268 } 8269 8270 case Type::ConstantArray: 8271 case Type::IncompleteArray: 8272 case Type::VariableArray: { 8273 const auto *AT = cast<ArrayType>(CT); 8274 8275 if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) { 8276 // Incomplete arrays are encoded as a pointer to the array element. 8277 S += '^'; 8278 8279 getObjCEncodingForTypeImpl( 8280 AT->getElementType(), S, 8281 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); 8282 } else { 8283 S += '['; 8284 8285 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 8286 S += llvm::utostr(CAT->getSize().getZExtValue()); 8287 else { 8288 //Variable length arrays are encoded as a regular array with 0 elements. 8289 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && 8290 "Unknown array type!"); 8291 S += '0'; 8292 } 8293 8294 getObjCEncodingForTypeImpl( 8295 AT->getElementType(), S, 8296 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, 8297 NotEncodedT); 8298 S += ']'; 8299 } 8300 return; 8301 } 8302 8303 case Type::FunctionNoProto: 8304 case Type::FunctionProto: 8305 S += '?'; 8306 return; 8307 8308 case Type::Record: { 8309 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); 8310 S += RDecl->isUnion() ? '(' : '{'; 8311 // Anonymous structures print as '?' 8312 if (const IdentifierInfo *II = RDecl->getIdentifier()) { 8313 S += II->getName(); 8314 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { 8315 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); 8316 llvm::raw_string_ostream OS(S); 8317 printTemplateArgumentList(OS, TemplateArgs.asArray(), 8318 getPrintingPolicy()); 8319 } 8320 } else { 8321 S += '?'; 8322 } 8323 if (Options.ExpandStructures()) { 8324 S += '='; 8325 if (!RDecl->isUnion()) { 8326 getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); 8327 } else { 8328 for (const auto *Field : RDecl->fields()) { 8329 if (FD) { 8330 S += '"'; 8331 S += Field->getNameAsString(); 8332 S += '"'; 8333 } 8334 8335 // Special case bit-fields. 8336 if (Field->isBitField()) { 8337 getObjCEncodingForTypeImpl(Field->getType(), S, 8338 ObjCEncOptions().setExpandStructures(), 8339 Field); 8340 } else { 8341 QualType qt = Field->getType(); 8342 getLegacyIntegralTypeEncoding(qt); 8343 getObjCEncodingForTypeImpl( 8344 qt, S, 8345 ObjCEncOptions().setExpandStructures().setIsStructField(), FD, 8346 NotEncodedT); 8347 } 8348 } 8349 } 8350 } 8351 S += RDecl->isUnion() ? ')' : '}'; 8352 return; 8353 } 8354 8355 case Type::BlockPointer: { 8356 const auto *BT = T->castAs<BlockPointerType>(); 8357 S += "@?"; // Unlike a pointer-to-function, which is "^?". 8358 if (Options.EncodeBlockParameters()) { 8359 const auto *FT = BT->getPointeeType()->castAs<FunctionType>(); 8360 8361 S += '<'; 8362 // Block return type 8363 getObjCEncodingForTypeImpl(FT->getReturnType(), S, 8364 Options.forComponentType(), FD, NotEncodedT); 8365 // Block self 8366 S += "@?"; 8367 // Block parameters 8368 if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) { 8369 for (const auto &I : FPT->param_types()) 8370 getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, 8371 NotEncodedT); 8372 } 8373 S += '>'; 8374 } 8375 return; 8376 } 8377 8378 case Type::ObjCObject: { 8379 // hack to match legacy encoding of *id and *Class 8380 QualType Ty = getObjCObjectPointerType(CT); 8381 if (Ty->isObjCIdType()) { 8382 S += "{objc_object=}"; 8383 return; 8384 } 8385 else if (Ty->isObjCClassType()) { 8386 S += "{objc_class=}"; 8387 return; 8388 } 8389 // TODO: Double check to make sure this intentionally falls through. 8390 [[fallthrough]]; 8391 } 8392 8393 case Type::ObjCInterface: { 8394 // Ignore protocol qualifiers when mangling at this level. 8395 // @encode(class_name) 8396 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface(); 8397 S += '{'; 8398 S += OI->getObjCRuntimeNameAsString(); 8399 if (Options.ExpandStructures()) { 8400 S += '='; 8401 SmallVector<const ObjCIvarDecl*, 32> Ivars; 8402 DeepCollectObjCIvars(OI, true, Ivars); 8403 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 8404 const FieldDecl *Field = Ivars[i]; 8405 if (Field->isBitField()) 8406 getObjCEncodingForTypeImpl(Field->getType(), S, 8407 ObjCEncOptions().setExpandStructures(), 8408 Field); 8409 else 8410 getObjCEncodingForTypeImpl(Field->getType(), S, 8411 ObjCEncOptions().setExpandStructures(), FD, 8412 NotEncodedT); 8413 } 8414 } 8415 S += '}'; 8416 return; 8417 } 8418 8419 case Type::ObjCObjectPointer: { 8420 const auto *OPT = T->castAs<ObjCObjectPointerType>(); 8421 if (OPT->isObjCIdType()) { 8422 S += '@'; 8423 return; 8424 } 8425 8426 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { 8427 // FIXME: Consider if we need to output qualifiers for 'Class<p>'. 8428 // Since this is a binary compatibility issue, need to consult with 8429 // runtime folks. Fortunately, this is a *very* obscure construct. 8430 S += '#'; 8431 return; 8432 } 8433 8434 if (OPT->isObjCQualifiedIdType()) { 8435 getObjCEncodingForTypeImpl( 8436 getObjCIdType(), S, 8437 Options.keepingOnly(ObjCEncOptions() 8438 .setExpandPointedToStructures() 8439 .setExpandStructures()), 8440 FD); 8441 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { 8442 // Note that we do extended encoding of protocol qualifier list 8443 // Only when doing ivar or property encoding. 8444 S += '"'; 8445 for (const auto *I : OPT->quals()) { 8446 S += '<'; 8447 S += I->getObjCRuntimeNameAsString(); 8448 S += '>'; 8449 } 8450 S += '"'; 8451 } 8452 return; 8453 } 8454 8455 S += '@'; 8456 if (OPT->getInterfaceDecl() && 8457 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { 8458 S += '"'; 8459 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); 8460 for (const auto *I : OPT->quals()) { 8461 S += '<'; 8462 S += I->getObjCRuntimeNameAsString(); 8463 S += '>'; 8464 } 8465 S += '"'; 8466 } 8467 return; 8468 } 8469 8470 // gcc just blithely ignores member pointers. 8471 // FIXME: we should do better than that. 'M' is available. 8472 case Type::MemberPointer: 8473 // This matches gcc's encoding, even though technically it is insufficient. 8474 //FIXME. We should do a better job than gcc. 8475 case Type::Vector: 8476 case Type::ExtVector: 8477 // Until we have a coherent encoding of these three types, issue warning. 8478 if (NotEncodedT) 8479 *NotEncodedT = T; 8480 return; 8481 8482 case Type::ConstantMatrix: 8483 if (NotEncodedT) 8484 *NotEncodedT = T; 8485 return; 8486 8487 case Type::BitInt: 8488 if (NotEncodedT) 8489 *NotEncodedT = T; 8490 return; 8491 8492 // We could see an undeduced auto type here during error recovery. 8493 // Just ignore it. 8494 case Type::Auto: 8495 case Type::DeducedTemplateSpecialization: 8496 return; 8497 8498 case Type::Pipe: 8499 #define ABSTRACT_TYPE(KIND, BASE) 8500 #define TYPE(KIND, BASE) 8501 #define DEPENDENT_TYPE(KIND, BASE) \ 8502 case Type::KIND: 8503 #define NON_CANONICAL_TYPE(KIND, BASE) \ 8504 case Type::KIND: 8505 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ 8506 case Type::KIND: 8507 #include "clang/AST/TypeNodes.inc" 8508 llvm_unreachable("@encode for dependent type!"); 8509 } 8510 llvm_unreachable("bad type kind!"); 8511 } 8512 8513 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, 8514 std::string &S, 8515 const FieldDecl *FD, 8516 bool includeVBases, 8517 QualType *NotEncodedT) const { 8518 assert(RDecl && "Expected non-null RecordDecl"); 8519 assert(!RDecl->isUnion() && "Should not be called for unions"); 8520 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) 8521 return; 8522 8523 const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); 8524 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; 8525 const ASTRecordLayout &layout = getASTRecordLayout(RDecl); 8526 8527 if (CXXRec) { 8528 for (const auto &BI : CXXRec->bases()) { 8529 if (!BI.isVirtual()) { 8530 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8531 if (base->isEmpty()) 8532 continue; 8533 uint64_t offs = toBits(layout.getBaseClassOffset(base)); 8534 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8535 std::make_pair(offs, base)); 8536 } 8537 } 8538 } 8539 8540 for (FieldDecl *Field : RDecl->fields()) { 8541 if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) 8542 continue; 8543 uint64_t offs = layout.getFieldOffset(Field->getFieldIndex()); 8544 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8545 std::make_pair(offs, Field)); 8546 } 8547 8548 if (CXXRec && includeVBases) { 8549 for (const auto &BI : CXXRec->vbases()) { 8550 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 8551 if (base->isEmpty()) 8552 continue; 8553 uint64_t offs = toBits(layout.getVBaseClassOffset(base)); 8554 if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && 8555 FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) 8556 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), 8557 std::make_pair(offs, base)); 8558 } 8559 } 8560 8561 CharUnits size; 8562 if (CXXRec) { 8563 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); 8564 } else { 8565 size = layout.getSize(); 8566 } 8567 8568 #ifndef NDEBUG 8569 uint64_t CurOffs = 0; 8570 #endif 8571 std::multimap<uint64_t, NamedDecl *>::iterator 8572 CurLayObj = FieldOrBaseOffsets.begin(); 8573 8574 if (CXXRec && CXXRec->isDynamicClass() && 8575 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { 8576 if (FD) { 8577 S += "\"_vptr$"; 8578 std::string recname = CXXRec->getNameAsString(); 8579 if (recname.empty()) recname = "?"; 8580 S += recname; 8581 S += '"'; 8582 } 8583 S += "^^?"; 8584 #ifndef NDEBUG 8585 CurOffs += getTypeSize(VoidPtrTy); 8586 #endif 8587 } 8588 8589 if (!RDecl->hasFlexibleArrayMember()) { 8590 // Mark the end of the structure. 8591 uint64_t offs = toBits(size); 8592 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8593 std::make_pair(offs, nullptr)); 8594 } 8595 8596 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { 8597 #ifndef NDEBUG 8598 assert(CurOffs <= CurLayObj->first); 8599 if (CurOffs < CurLayObj->first) { 8600 uint64_t padding = CurLayObj->first - CurOffs; 8601 // FIXME: There doesn't seem to be a way to indicate in the encoding that 8602 // packing/alignment of members is different that normal, in which case 8603 // the encoding will be out-of-sync with the real layout. 8604 // If the runtime switches to just consider the size of types without 8605 // taking into account alignment, we could make padding explicit in the 8606 // encoding (e.g. using arrays of chars). The encoding strings would be 8607 // longer then though. 8608 CurOffs += padding; 8609 } 8610 #endif 8611 8612 NamedDecl *dcl = CurLayObj->second; 8613 if (!dcl) 8614 break; // reached end of structure. 8615 8616 if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) { 8617 // We expand the bases without their virtual bases since those are going 8618 // in the initial structure. Note that this differs from gcc which 8619 // expands virtual bases each time one is encountered in the hierarchy, 8620 // making the encoding type bigger than it really is. 8621 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, 8622 NotEncodedT); 8623 assert(!base->isEmpty()); 8624 #ifndef NDEBUG 8625 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); 8626 #endif 8627 } else { 8628 const auto *field = cast<FieldDecl>(dcl); 8629 if (FD) { 8630 S += '"'; 8631 S += field->getNameAsString(); 8632 S += '"'; 8633 } 8634 8635 if (field->isBitField()) { 8636 EncodeBitField(this, S, field->getType(), field); 8637 #ifndef NDEBUG 8638 CurOffs += field->getBitWidthValue(*this); 8639 #endif 8640 } else { 8641 QualType qt = field->getType(); 8642 getLegacyIntegralTypeEncoding(qt); 8643 getObjCEncodingForTypeImpl( 8644 qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), 8645 FD, NotEncodedT); 8646 #ifndef NDEBUG 8647 CurOffs += getTypeSize(field->getType()); 8648 #endif 8649 } 8650 } 8651 } 8652 } 8653 8654 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, 8655 std::string& S) const { 8656 if (QT & Decl::OBJC_TQ_In) 8657 S += 'n'; 8658 if (QT & Decl::OBJC_TQ_Inout) 8659 S += 'N'; 8660 if (QT & Decl::OBJC_TQ_Out) 8661 S += 'o'; 8662 if (QT & Decl::OBJC_TQ_Bycopy) 8663 S += 'O'; 8664 if (QT & Decl::OBJC_TQ_Byref) 8665 S += 'R'; 8666 if (QT & Decl::OBJC_TQ_Oneway) 8667 S += 'V'; 8668 } 8669 8670 TypedefDecl *ASTContext::getObjCIdDecl() const { 8671 if (!ObjCIdDecl) { 8672 QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); 8673 T = getObjCObjectPointerType(T); 8674 ObjCIdDecl = buildImplicitTypedef(T, "id"); 8675 } 8676 return ObjCIdDecl; 8677 } 8678 8679 TypedefDecl *ASTContext::getObjCSelDecl() const { 8680 if (!ObjCSelDecl) { 8681 QualType T = getPointerType(ObjCBuiltinSelTy); 8682 ObjCSelDecl = buildImplicitTypedef(T, "SEL"); 8683 } 8684 return ObjCSelDecl; 8685 } 8686 8687 TypedefDecl *ASTContext::getObjCClassDecl() const { 8688 if (!ObjCClassDecl) { 8689 QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); 8690 T = getObjCObjectPointerType(T); 8691 ObjCClassDecl = buildImplicitTypedef(T, "Class"); 8692 } 8693 return ObjCClassDecl; 8694 } 8695 8696 ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { 8697 if (!ObjCProtocolClassDecl) { 8698 ObjCProtocolClassDecl 8699 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), 8700 SourceLocation(), 8701 &Idents.get("Protocol"), 8702 /*typeParamList=*/nullptr, 8703 /*PrevDecl=*/nullptr, 8704 SourceLocation(), true); 8705 } 8706 8707 return ObjCProtocolClassDecl; 8708 } 8709 8710 //===----------------------------------------------------------------------===// 8711 // __builtin_va_list Construction Functions 8712 //===----------------------------------------------------------------------===// 8713 8714 static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, 8715 StringRef Name) { 8716 // typedef char* __builtin[_ms]_va_list; 8717 QualType T = Context->getPointerType(Context->CharTy); 8718 return Context->buildImplicitTypedef(T, Name); 8719 } 8720 8721 static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { 8722 return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); 8723 } 8724 8725 static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { 8726 return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); 8727 } 8728 8729 static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { 8730 // typedef void* __builtin_va_list; 8731 QualType T = Context->getPointerType(Context->VoidTy); 8732 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8733 } 8734 8735 static TypedefDecl * 8736 CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { 8737 // struct __va_list 8738 RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); 8739 if (Context->getLangOpts().CPlusPlus) { 8740 // namespace std { struct __va_list { 8741 auto *NS = NamespaceDecl::Create( 8742 const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(), 8743 /*Inline=*/false, SourceLocation(), SourceLocation(), 8744 &Context->Idents.get("std"), 8745 /*PrevDecl=*/nullptr, /*Nested=*/false); 8746 NS->setImplicit(); 8747 VaListTagDecl->setDeclContext(NS); 8748 } 8749 8750 VaListTagDecl->startDefinition(); 8751 8752 const size_t NumFields = 5; 8753 QualType FieldTypes[NumFields]; 8754 const char *FieldNames[NumFields]; 8755 8756 // void *__stack; 8757 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8758 FieldNames[0] = "__stack"; 8759 8760 // void *__gr_top; 8761 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8762 FieldNames[1] = "__gr_top"; 8763 8764 // void *__vr_top; 8765 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8766 FieldNames[2] = "__vr_top"; 8767 8768 // int __gr_offs; 8769 FieldTypes[3] = Context->IntTy; 8770 FieldNames[3] = "__gr_offs"; 8771 8772 // int __vr_offs; 8773 FieldTypes[4] = Context->IntTy; 8774 FieldNames[4] = "__vr_offs"; 8775 8776 // Create fields 8777 for (unsigned i = 0; i < NumFields; ++i) { 8778 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8779 VaListTagDecl, 8780 SourceLocation(), 8781 SourceLocation(), 8782 &Context->Idents.get(FieldNames[i]), 8783 FieldTypes[i], /*TInfo=*/nullptr, 8784 /*BitWidth=*/nullptr, 8785 /*Mutable=*/false, 8786 ICIS_NoInit); 8787 Field->setAccess(AS_public); 8788 VaListTagDecl->addDecl(Field); 8789 } 8790 VaListTagDecl->completeDefinition(); 8791 Context->VaListTagDecl = VaListTagDecl; 8792 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8793 8794 // } __builtin_va_list; 8795 return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); 8796 } 8797 8798 static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { 8799 // typedef struct __va_list_tag { 8800 RecordDecl *VaListTagDecl; 8801 8802 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8803 VaListTagDecl->startDefinition(); 8804 8805 const size_t NumFields = 5; 8806 QualType FieldTypes[NumFields]; 8807 const char *FieldNames[NumFields]; 8808 8809 // unsigned char gpr; 8810 FieldTypes[0] = Context->UnsignedCharTy; 8811 FieldNames[0] = "gpr"; 8812 8813 // unsigned char fpr; 8814 FieldTypes[1] = Context->UnsignedCharTy; 8815 FieldNames[1] = "fpr"; 8816 8817 // unsigned short reserved; 8818 FieldTypes[2] = Context->UnsignedShortTy; 8819 FieldNames[2] = "reserved"; 8820 8821 // void* overflow_arg_area; 8822 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8823 FieldNames[3] = "overflow_arg_area"; 8824 8825 // void* reg_save_area; 8826 FieldTypes[4] = Context->getPointerType(Context->VoidTy); 8827 FieldNames[4] = "reg_save_area"; 8828 8829 // Create fields 8830 for (unsigned i = 0; i < NumFields; ++i) { 8831 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, 8832 SourceLocation(), 8833 SourceLocation(), 8834 &Context->Idents.get(FieldNames[i]), 8835 FieldTypes[i], /*TInfo=*/nullptr, 8836 /*BitWidth=*/nullptr, 8837 /*Mutable=*/false, 8838 ICIS_NoInit); 8839 Field->setAccess(AS_public); 8840 VaListTagDecl->addDecl(Field); 8841 } 8842 VaListTagDecl->completeDefinition(); 8843 Context->VaListTagDecl = VaListTagDecl; 8844 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8845 8846 // } __va_list_tag; 8847 TypedefDecl *VaListTagTypedefDecl = 8848 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8849 8850 QualType VaListTagTypedefType = 8851 Context->getTypedefType(VaListTagTypedefDecl); 8852 8853 // typedef __va_list_tag __builtin_va_list[1]; 8854 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8855 QualType VaListTagArrayType = Context->getConstantArrayType( 8856 VaListTagTypedefType, Size, nullptr, ArraySizeModifier::Normal, 0); 8857 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8858 } 8859 8860 static TypedefDecl * 8861 CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { 8862 // struct __va_list_tag { 8863 RecordDecl *VaListTagDecl; 8864 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8865 VaListTagDecl->startDefinition(); 8866 8867 const size_t NumFields = 4; 8868 QualType FieldTypes[NumFields]; 8869 const char *FieldNames[NumFields]; 8870 8871 // unsigned gp_offset; 8872 FieldTypes[0] = Context->UnsignedIntTy; 8873 FieldNames[0] = "gp_offset"; 8874 8875 // unsigned fp_offset; 8876 FieldTypes[1] = Context->UnsignedIntTy; 8877 FieldNames[1] = "fp_offset"; 8878 8879 // void* overflow_arg_area; 8880 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8881 FieldNames[2] = "overflow_arg_area"; 8882 8883 // void* reg_save_area; 8884 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8885 FieldNames[3] = "reg_save_area"; 8886 8887 // Create fields 8888 for (unsigned i = 0; i < NumFields; ++i) { 8889 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8890 VaListTagDecl, 8891 SourceLocation(), 8892 SourceLocation(), 8893 &Context->Idents.get(FieldNames[i]), 8894 FieldTypes[i], /*TInfo=*/nullptr, 8895 /*BitWidth=*/nullptr, 8896 /*Mutable=*/false, 8897 ICIS_NoInit); 8898 Field->setAccess(AS_public); 8899 VaListTagDecl->addDecl(Field); 8900 } 8901 VaListTagDecl->completeDefinition(); 8902 Context->VaListTagDecl = VaListTagDecl; 8903 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8904 8905 // }; 8906 8907 // typedef struct __va_list_tag __builtin_va_list[1]; 8908 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8909 QualType VaListTagArrayType = Context->getConstantArrayType( 8910 VaListTagType, Size, nullptr, ArraySizeModifier::Normal, 0); 8911 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8912 } 8913 8914 static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { 8915 // typedef int __builtin_va_list[4]; 8916 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); 8917 QualType IntArrayType = Context->getConstantArrayType( 8918 Context->IntTy, Size, nullptr, ArraySizeModifier::Normal, 0); 8919 return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); 8920 } 8921 8922 static TypedefDecl * 8923 CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { 8924 // struct __va_list 8925 RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); 8926 if (Context->getLangOpts().CPlusPlus) { 8927 // namespace std { struct __va_list { 8928 NamespaceDecl *NS; 8929 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 8930 Context->getTranslationUnitDecl(), 8931 /*Inline=*/false, SourceLocation(), 8932 SourceLocation(), &Context->Idents.get("std"), 8933 /*PrevDecl=*/nullptr, /*Nested=*/false); 8934 NS->setImplicit(); 8935 VaListDecl->setDeclContext(NS); 8936 } 8937 8938 VaListDecl->startDefinition(); 8939 8940 // void * __ap; 8941 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8942 VaListDecl, 8943 SourceLocation(), 8944 SourceLocation(), 8945 &Context->Idents.get("__ap"), 8946 Context->getPointerType(Context->VoidTy), 8947 /*TInfo=*/nullptr, 8948 /*BitWidth=*/nullptr, 8949 /*Mutable=*/false, 8950 ICIS_NoInit); 8951 Field->setAccess(AS_public); 8952 VaListDecl->addDecl(Field); 8953 8954 // }; 8955 VaListDecl->completeDefinition(); 8956 Context->VaListTagDecl = VaListDecl; 8957 8958 // typedef struct __va_list __builtin_va_list; 8959 QualType T = Context->getRecordType(VaListDecl); 8960 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8961 } 8962 8963 static TypedefDecl * 8964 CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { 8965 // struct __va_list_tag { 8966 RecordDecl *VaListTagDecl; 8967 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8968 VaListTagDecl->startDefinition(); 8969 8970 const size_t NumFields = 4; 8971 QualType FieldTypes[NumFields]; 8972 const char *FieldNames[NumFields]; 8973 8974 // long __gpr; 8975 FieldTypes[0] = Context->LongTy; 8976 FieldNames[0] = "__gpr"; 8977 8978 // long __fpr; 8979 FieldTypes[1] = Context->LongTy; 8980 FieldNames[1] = "__fpr"; 8981 8982 // void *__overflow_arg_area; 8983 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8984 FieldNames[2] = "__overflow_arg_area"; 8985 8986 // void *__reg_save_area; 8987 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8988 FieldNames[3] = "__reg_save_area"; 8989 8990 // Create fields 8991 for (unsigned i = 0; i < NumFields; ++i) { 8992 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8993 VaListTagDecl, 8994 SourceLocation(), 8995 SourceLocation(), 8996 &Context->Idents.get(FieldNames[i]), 8997 FieldTypes[i], /*TInfo=*/nullptr, 8998 /*BitWidth=*/nullptr, 8999 /*Mutable=*/false, 9000 ICIS_NoInit); 9001 Field->setAccess(AS_public); 9002 VaListTagDecl->addDecl(Field); 9003 } 9004 VaListTagDecl->completeDefinition(); 9005 Context->VaListTagDecl = VaListTagDecl; 9006 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9007 9008 // }; 9009 9010 // typedef __va_list_tag __builtin_va_list[1]; 9011 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9012 QualType VaListTagArrayType = Context->getConstantArrayType( 9013 VaListTagType, Size, nullptr, ArraySizeModifier::Normal, 0); 9014 9015 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9016 } 9017 9018 static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { 9019 // typedef struct __va_list_tag { 9020 RecordDecl *VaListTagDecl; 9021 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 9022 VaListTagDecl->startDefinition(); 9023 9024 const size_t NumFields = 3; 9025 QualType FieldTypes[NumFields]; 9026 const char *FieldNames[NumFields]; 9027 9028 // void *CurrentSavedRegisterArea; 9029 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 9030 FieldNames[0] = "__current_saved_reg_area_pointer"; 9031 9032 // void *SavedRegAreaEnd; 9033 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 9034 FieldNames[1] = "__saved_reg_area_end_pointer"; 9035 9036 // void *OverflowArea; 9037 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 9038 FieldNames[2] = "__overflow_area_pointer"; 9039 9040 // Create fields 9041 for (unsigned i = 0; i < NumFields; ++i) { 9042 FieldDecl *Field = FieldDecl::Create( 9043 const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(), 9044 SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], 9045 /*TInfo=*/nullptr, 9046 /*BitWidth=*/nullptr, 9047 /*Mutable=*/false, ICIS_NoInit); 9048 Field->setAccess(AS_public); 9049 VaListTagDecl->addDecl(Field); 9050 } 9051 VaListTagDecl->completeDefinition(); 9052 Context->VaListTagDecl = VaListTagDecl; 9053 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 9054 9055 // } __va_list_tag; 9056 TypedefDecl *VaListTagTypedefDecl = 9057 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 9058 9059 QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); 9060 9061 // typedef __va_list_tag __builtin_va_list[1]; 9062 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 9063 QualType VaListTagArrayType = Context->getConstantArrayType( 9064 VaListTagTypedefType, Size, nullptr, ArraySizeModifier::Normal, 0); 9065 9066 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 9067 } 9068 9069 static TypedefDecl *CreateVaListDecl(const ASTContext *Context, 9070 TargetInfo::BuiltinVaListKind Kind) { 9071 switch (Kind) { 9072 case TargetInfo::CharPtrBuiltinVaList: 9073 return CreateCharPtrBuiltinVaListDecl(Context); 9074 case TargetInfo::VoidPtrBuiltinVaList: 9075 return CreateVoidPtrBuiltinVaListDecl(Context); 9076 case TargetInfo::AArch64ABIBuiltinVaList: 9077 return CreateAArch64ABIBuiltinVaListDecl(Context); 9078 case TargetInfo::PowerABIBuiltinVaList: 9079 return CreatePowerABIBuiltinVaListDecl(Context); 9080 case TargetInfo::X86_64ABIBuiltinVaList: 9081 return CreateX86_64ABIBuiltinVaListDecl(Context); 9082 case TargetInfo::PNaClABIBuiltinVaList: 9083 return CreatePNaClABIBuiltinVaListDecl(Context); 9084 case TargetInfo::AAPCSABIBuiltinVaList: 9085 return CreateAAPCSABIBuiltinVaListDecl(Context); 9086 case TargetInfo::SystemZBuiltinVaList: 9087 return CreateSystemZBuiltinVaListDecl(Context); 9088 case TargetInfo::HexagonBuiltinVaList: 9089 return CreateHexagonBuiltinVaListDecl(Context); 9090 } 9091 9092 llvm_unreachable("Unhandled __builtin_va_list type kind"); 9093 } 9094 9095 TypedefDecl *ASTContext::getBuiltinVaListDecl() const { 9096 if (!BuiltinVaListDecl) { 9097 BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); 9098 assert(BuiltinVaListDecl->isImplicit()); 9099 } 9100 9101 return BuiltinVaListDecl; 9102 } 9103 9104 Decl *ASTContext::getVaListTagDecl() const { 9105 // Force the creation of VaListTagDecl by building the __builtin_va_list 9106 // declaration. 9107 if (!VaListTagDecl) 9108 (void)getBuiltinVaListDecl(); 9109 9110 return VaListTagDecl; 9111 } 9112 9113 TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { 9114 if (!BuiltinMSVaListDecl) 9115 BuiltinMSVaListDecl = CreateMSVaListDecl(this); 9116 9117 return BuiltinMSVaListDecl; 9118 } 9119 9120 bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { 9121 // Allow redecl custom type checking builtin for HLSL. 9122 if (LangOpts.HLSL && FD->getBuiltinID() != Builtin::NotBuiltin && 9123 BuiltinInfo.hasCustomTypechecking(FD->getBuiltinID())) 9124 return true; 9125 return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); 9126 } 9127 9128 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { 9129 assert(ObjCConstantStringType.isNull() && 9130 "'NSConstantString' type already set!"); 9131 9132 ObjCConstantStringType = getObjCInterfaceType(Decl); 9133 } 9134 9135 /// Retrieve the template name that corresponds to a non-empty 9136 /// lookup. 9137 TemplateName 9138 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, 9139 UnresolvedSetIterator End) const { 9140 unsigned size = End - Begin; 9141 assert(size > 1 && "set is not overloaded!"); 9142 9143 void *memory = Allocate(sizeof(OverloadedTemplateStorage) + 9144 size * sizeof(FunctionTemplateDecl*)); 9145 auto *OT = new (memory) OverloadedTemplateStorage(size); 9146 9147 NamedDecl **Storage = OT->getStorage(); 9148 for (UnresolvedSetIterator I = Begin; I != End; ++I) { 9149 NamedDecl *D = *I; 9150 assert(isa<FunctionTemplateDecl>(D) || 9151 isa<UnresolvedUsingValueDecl>(D) || 9152 (isa<UsingShadowDecl>(D) && 9153 isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); 9154 *Storage++ = D; 9155 } 9156 9157 return TemplateName(OT); 9158 } 9159 9160 /// Retrieve a template name representing an unqualified-id that has been 9161 /// assumed to name a template for ADL purposes. 9162 TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { 9163 auto *OT = new (*this) AssumedTemplateStorage(Name); 9164 return TemplateName(OT); 9165 } 9166 9167 /// Retrieve the template name that represents a qualified 9168 /// template name such as \c std::vector. 9169 TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, 9170 bool TemplateKeyword, 9171 TemplateName Template) const { 9172 assert(NNS && "Missing nested-name-specifier in qualified template name"); 9173 9174 // FIXME: Canonicalization? 9175 llvm::FoldingSetNodeID ID; 9176 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); 9177 9178 void *InsertPos = nullptr; 9179 QualifiedTemplateName *QTN = 9180 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9181 if (!QTN) { 9182 QTN = new (*this, alignof(QualifiedTemplateName)) 9183 QualifiedTemplateName(NNS, TemplateKeyword, Template); 9184 QualifiedTemplateNames.InsertNode(QTN, InsertPos); 9185 } 9186 9187 return TemplateName(QTN); 9188 } 9189 9190 /// Retrieve the template name that represents a dependent 9191 /// template name such as \c MetaFun::template apply. 9192 TemplateName 9193 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9194 const IdentifierInfo *Name) const { 9195 assert((!NNS || NNS->isDependent()) && 9196 "Nested name specifier must be dependent"); 9197 9198 llvm::FoldingSetNodeID ID; 9199 DependentTemplateName::Profile(ID, NNS, Name); 9200 9201 void *InsertPos = nullptr; 9202 DependentTemplateName *QTN = 9203 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9204 9205 if (QTN) 9206 return TemplateName(QTN); 9207 9208 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9209 if (CanonNNS == NNS) { 9210 QTN = new (*this, alignof(DependentTemplateName)) 9211 DependentTemplateName(NNS, Name); 9212 } else { 9213 TemplateName Canon = getDependentTemplateName(CanonNNS, Name); 9214 QTN = new (*this, alignof(DependentTemplateName)) 9215 DependentTemplateName(NNS, Name, Canon); 9216 DependentTemplateName *CheckQTN = 9217 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9218 assert(!CheckQTN && "Dependent type name canonicalization broken"); 9219 (void)CheckQTN; 9220 } 9221 9222 DependentTemplateNames.InsertNode(QTN, InsertPos); 9223 return TemplateName(QTN); 9224 } 9225 9226 /// Retrieve the template name that represents a dependent 9227 /// template name such as \c MetaFun::template operator+. 9228 TemplateName 9229 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 9230 OverloadedOperatorKind Operator) const { 9231 assert((!NNS || NNS->isDependent()) && 9232 "Nested name specifier must be dependent"); 9233 9234 llvm::FoldingSetNodeID ID; 9235 DependentTemplateName::Profile(ID, NNS, Operator); 9236 9237 void *InsertPos = nullptr; 9238 DependentTemplateName *QTN 9239 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9240 9241 if (QTN) 9242 return TemplateName(QTN); 9243 9244 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 9245 if (CanonNNS == NNS) { 9246 QTN = new (*this, alignof(DependentTemplateName)) 9247 DependentTemplateName(NNS, Operator); 9248 } else { 9249 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); 9250 QTN = new (*this, alignof(DependentTemplateName)) 9251 DependentTemplateName(NNS, Operator, Canon); 9252 9253 DependentTemplateName *CheckQTN 9254 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 9255 assert(!CheckQTN && "Dependent template name canonicalization broken"); 9256 (void)CheckQTN; 9257 } 9258 9259 DependentTemplateNames.InsertNode(QTN, InsertPos); 9260 return TemplateName(QTN); 9261 } 9262 9263 TemplateName ASTContext::getSubstTemplateTemplateParm( 9264 TemplateName Replacement, Decl *AssociatedDecl, unsigned Index, 9265 std::optional<unsigned> PackIndex) const { 9266 llvm::FoldingSetNodeID ID; 9267 SubstTemplateTemplateParmStorage::Profile(ID, Replacement, AssociatedDecl, 9268 Index, PackIndex); 9269 9270 void *insertPos = nullptr; 9271 SubstTemplateTemplateParmStorage *subst 9272 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); 9273 9274 if (!subst) { 9275 subst = new (*this) SubstTemplateTemplateParmStorage( 9276 Replacement, AssociatedDecl, Index, PackIndex); 9277 SubstTemplateTemplateParms.InsertNode(subst, insertPos); 9278 } 9279 9280 return TemplateName(subst); 9281 } 9282 9283 TemplateName 9284 ASTContext::getSubstTemplateTemplateParmPack(const TemplateArgument &ArgPack, 9285 Decl *AssociatedDecl, 9286 unsigned Index, bool Final) const { 9287 auto &Self = const_cast<ASTContext &>(*this); 9288 llvm::FoldingSetNodeID ID; 9289 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, ArgPack, 9290 AssociatedDecl, Index, Final); 9291 9292 void *InsertPos = nullptr; 9293 SubstTemplateTemplateParmPackStorage *Subst 9294 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); 9295 9296 if (!Subst) { 9297 Subst = new (*this) SubstTemplateTemplateParmPackStorage( 9298 ArgPack.pack_elements(), AssociatedDecl, Index, Final); 9299 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); 9300 } 9301 9302 return TemplateName(Subst); 9303 } 9304 9305 /// getFromTargetType - Given one of the integer types provided by 9306 /// TargetInfo, produce the corresponding type. The unsigned @p Type 9307 /// is actually a value of type @c TargetInfo::IntType. 9308 CanQualType ASTContext::getFromTargetType(unsigned Type) const { 9309 switch (Type) { 9310 case TargetInfo::NoInt: return {}; 9311 case TargetInfo::SignedChar: return SignedCharTy; 9312 case TargetInfo::UnsignedChar: return UnsignedCharTy; 9313 case TargetInfo::SignedShort: return ShortTy; 9314 case TargetInfo::UnsignedShort: return UnsignedShortTy; 9315 case TargetInfo::SignedInt: return IntTy; 9316 case TargetInfo::UnsignedInt: return UnsignedIntTy; 9317 case TargetInfo::SignedLong: return LongTy; 9318 case TargetInfo::UnsignedLong: return UnsignedLongTy; 9319 case TargetInfo::SignedLongLong: return LongLongTy; 9320 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; 9321 } 9322 9323 llvm_unreachable("Unhandled TargetInfo::IntType value"); 9324 } 9325 9326 //===----------------------------------------------------------------------===// 9327 // Type Predicates. 9328 //===----------------------------------------------------------------------===// 9329 9330 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's 9331 /// garbage collection attribute. 9332 /// 9333 Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { 9334 if (getLangOpts().getGC() == LangOptions::NonGC) 9335 return Qualifiers::GCNone; 9336 9337 assert(getLangOpts().ObjC); 9338 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); 9339 9340 // Default behaviour under objective-C's gc is for ObjC pointers 9341 // (or pointers to them) be treated as though they were declared 9342 // as __strong. 9343 if (GCAttrs == Qualifiers::GCNone) { 9344 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 9345 return Qualifiers::Strong; 9346 else if (Ty->isPointerType()) 9347 return getObjCGCAttrKind(Ty->castAs<PointerType>()->getPointeeType()); 9348 } else { 9349 // It's not valid to set GC attributes on anything that isn't a 9350 // pointer. 9351 #ifndef NDEBUG 9352 QualType CT = Ty->getCanonicalTypeInternal(); 9353 while (const auto *AT = dyn_cast<ArrayType>(CT)) 9354 CT = AT->getElementType(); 9355 assert(CT->isAnyPointerType() || CT->isBlockPointerType()); 9356 #endif 9357 } 9358 return GCAttrs; 9359 } 9360 9361 //===----------------------------------------------------------------------===// 9362 // Type Compatibility Testing 9363 //===----------------------------------------------------------------------===// 9364 9365 /// areCompatVectorTypes - Return true if the two specified vector types are 9366 /// compatible. 9367 static bool areCompatVectorTypes(const VectorType *LHS, 9368 const VectorType *RHS) { 9369 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9370 return LHS->getElementType() == RHS->getElementType() && 9371 LHS->getNumElements() == RHS->getNumElements(); 9372 } 9373 9374 /// areCompatMatrixTypes - Return true if the two specified matrix types are 9375 /// compatible. 9376 static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, 9377 const ConstantMatrixType *RHS) { 9378 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 9379 return LHS->getElementType() == RHS->getElementType() && 9380 LHS->getNumRows() == RHS->getNumRows() && 9381 LHS->getNumColumns() == RHS->getNumColumns(); 9382 } 9383 9384 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, 9385 QualType SecondVec) { 9386 assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); 9387 assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); 9388 9389 if (hasSameUnqualifiedType(FirstVec, SecondVec)) 9390 return true; 9391 9392 // Treat Neon vector types and most AltiVec vector types as if they are the 9393 // equivalent GCC vector types. 9394 const auto *First = FirstVec->castAs<VectorType>(); 9395 const auto *Second = SecondVec->castAs<VectorType>(); 9396 if (First->getNumElements() == Second->getNumElements() && 9397 hasSameType(First->getElementType(), Second->getElementType()) && 9398 First->getVectorKind() != VectorKind::AltiVecPixel && 9399 First->getVectorKind() != VectorKind::AltiVecBool && 9400 Second->getVectorKind() != VectorKind::AltiVecPixel && 9401 Second->getVectorKind() != VectorKind::AltiVecBool && 9402 First->getVectorKind() != VectorKind::SveFixedLengthData && 9403 First->getVectorKind() != VectorKind::SveFixedLengthPredicate && 9404 Second->getVectorKind() != VectorKind::SveFixedLengthData && 9405 Second->getVectorKind() != VectorKind::SveFixedLengthPredicate && 9406 First->getVectorKind() != VectorKind::RVVFixedLengthData && 9407 Second->getVectorKind() != VectorKind::RVVFixedLengthData) 9408 return true; 9409 9410 return false; 9411 } 9412 9413 /// getSVETypeSize - Return SVE vector or predicate register size. 9414 static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { 9415 assert(Ty->isSveVLSBuiltinType() && "Invalid SVE Type"); 9416 if (Ty->getKind() == BuiltinType::SveBool || 9417 Ty->getKind() == BuiltinType::SveCount) 9418 return (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth(); 9419 return Context.getLangOpts().VScaleMin * 128; 9420 } 9421 9422 bool ASTContext::areCompatibleSveTypes(QualType FirstType, 9423 QualType SecondType) { 9424 assert( 9425 ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) || 9426 (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) && 9427 "Expected SVE builtin type and vector type!"); 9428 9429 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9430 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9431 if (const auto *VT = SecondType->getAs<VectorType>()) { 9432 // Predicates have the same representation as uint8 so we also have to 9433 // check the kind to make these types incompatible. 9434 if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) 9435 return BT->getKind() == BuiltinType::SveBool; 9436 else if (VT->getVectorKind() == VectorKind::SveFixedLengthData) 9437 return VT->getElementType().getCanonicalType() == 9438 FirstType->getSveEltType(*this); 9439 else if (VT->getVectorKind() == VectorKind::Generic) 9440 return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && 9441 hasSameType(VT->getElementType(), 9442 getBuiltinVectorTypeInfo(BT).ElementType); 9443 } 9444 } 9445 return false; 9446 }; 9447 9448 return IsValidCast(FirstType, SecondType) || 9449 IsValidCast(SecondType, FirstType); 9450 } 9451 9452 bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, 9453 QualType SecondType) { 9454 assert( 9455 ((FirstType->isSVESizelessBuiltinType() && SecondType->isVectorType()) || 9456 (FirstType->isVectorType() && SecondType->isSVESizelessBuiltinType())) && 9457 "Expected SVE builtin type and vector type!"); 9458 9459 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9460 const auto *BT = FirstType->getAs<BuiltinType>(); 9461 if (!BT) 9462 return false; 9463 9464 const auto *VecTy = SecondType->getAs<VectorType>(); 9465 if (VecTy && (VecTy->getVectorKind() == VectorKind::SveFixedLengthData || 9466 VecTy->getVectorKind() == VectorKind::Generic)) { 9467 const LangOptions::LaxVectorConversionKind LVCKind = 9468 getLangOpts().getLaxVectorConversions(); 9469 9470 // Can not convert between sve predicates and sve vectors because of 9471 // different size. 9472 if (BT->getKind() == BuiltinType::SveBool && 9473 VecTy->getVectorKind() == VectorKind::SveFixedLengthData) 9474 return false; 9475 9476 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. 9477 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly 9478 // converts to VLAT and VLAT implicitly converts to GNUT." 9479 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and 9480 // predicates. 9481 if (VecTy->getVectorKind() == VectorKind::Generic && 9482 getTypeSize(SecondType) != getSVETypeSize(*this, BT)) 9483 return false; 9484 9485 // If -flax-vector-conversions=all is specified, the types are 9486 // certainly compatible. 9487 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9488 return true; 9489 9490 // If -flax-vector-conversions=integer is specified, the types are 9491 // compatible if the elements are integer types. 9492 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9493 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9494 FirstType->getSveEltType(*this)->isIntegerType(); 9495 } 9496 9497 return false; 9498 }; 9499 9500 return IsLaxCompatible(FirstType, SecondType) || 9501 IsLaxCompatible(SecondType, FirstType); 9502 } 9503 9504 /// getRVVTypeSize - Return RVV vector register size. 9505 static uint64_t getRVVTypeSize(ASTContext &Context, const BuiltinType *Ty) { 9506 assert(Ty->isRVVVLSBuiltinType() && "Invalid RVV Type"); 9507 auto VScale = Context.getTargetInfo().getVScaleRange(Context.getLangOpts()); 9508 if (!VScale) 9509 return 0; 9510 9511 ASTContext::BuiltinVectorTypeInfo Info = Context.getBuiltinVectorTypeInfo(Ty); 9512 9513 uint64_t EltSize = Context.getTypeSize(Info.ElementType); 9514 uint64_t MinElts = Info.EC.getKnownMinValue(); 9515 return VScale->first * MinElts * EltSize; 9516 } 9517 9518 bool ASTContext::areCompatibleRVVTypes(QualType FirstType, 9519 QualType SecondType) { 9520 assert( 9521 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) || 9522 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) && 9523 "Expected RVV builtin type and vector type!"); 9524 9525 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 9526 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 9527 if (const auto *VT = SecondType->getAs<VectorType>()) { 9528 if (VT->getVectorKind() == VectorKind::RVVFixedLengthData || 9529 VT->getVectorKind() == VectorKind::Generic) 9530 return FirstType->isRVVVLSBuiltinType() && 9531 getTypeSize(SecondType) == getRVVTypeSize(*this, BT) && 9532 hasSameType(VT->getElementType(), 9533 getBuiltinVectorTypeInfo(BT).ElementType); 9534 } 9535 } 9536 return false; 9537 }; 9538 9539 return IsValidCast(FirstType, SecondType) || 9540 IsValidCast(SecondType, FirstType); 9541 } 9542 9543 bool ASTContext::areLaxCompatibleRVVTypes(QualType FirstType, 9544 QualType SecondType) { 9545 assert( 9546 ((FirstType->isRVVSizelessBuiltinType() && SecondType->isVectorType()) || 9547 (FirstType->isVectorType() && SecondType->isRVVSizelessBuiltinType())) && 9548 "Expected RVV builtin type and vector type!"); 9549 9550 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 9551 const auto *BT = FirstType->getAs<BuiltinType>(); 9552 if (!BT) 9553 return false; 9554 9555 if (!BT->isRVVVLSBuiltinType()) 9556 return false; 9557 9558 const auto *VecTy = SecondType->getAs<VectorType>(); 9559 if (VecTy && VecTy->getVectorKind() == VectorKind::Generic) { 9560 const LangOptions::LaxVectorConversionKind LVCKind = 9561 getLangOpts().getLaxVectorConversions(); 9562 9563 // If __riscv_v_fixed_vlen != N do not allow vector lax conversion. 9564 if (getTypeSize(SecondType) != getRVVTypeSize(*this, BT)) 9565 return false; 9566 9567 // If -flax-vector-conversions=all is specified, the types are 9568 // certainly compatible. 9569 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 9570 return true; 9571 9572 // If -flax-vector-conversions=integer is specified, the types are 9573 // compatible if the elements are integer types. 9574 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 9575 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 9576 FirstType->getRVVEltType(*this)->isIntegerType(); 9577 } 9578 9579 return false; 9580 }; 9581 9582 return IsLaxCompatible(FirstType, SecondType) || 9583 IsLaxCompatible(SecondType, FirstType); 9584 } 9585 9586 bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { 9587 while (true) { 9588 // __strong id 9589 if (const AttributedType *Attr = dyn_cast<AttributedType>(Ty)) { 9590 if (Attr->getAttrKind() == attr::ObjCOwnership) 9591 return true; 9592 9593 Ty = Attr->getModifiedType(); 9594 9595 // X *__strong (...) 9596 } else if (const ParenType *Paren = dyn_cast<ParenType>(Ty)) { 9597 Ty = Paren->getInnerType(); 9598 9599 // We do not want to look through typedefs, typeof(expr), 9600 // typeof(type), or any other way that the type is somehow 9601 // abstracted. 9602 } else { 9603 return false; 9604 } 9605 } 9606 } 9607 9608 //===----------------------------------------------------------------------===// 9609 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. 9610 //===----------------------------------------------------------------------===// 9611 9612 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the 9613 /// inheritance hierarchy of 'rProto'. 9614 bool 9615 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, 9616 ObjCProtocolDecl *rProto) const { 9617 if (declaresSameEntity(lProto, rProto)) 9618 return true; 9619 for (auto *PI : rProto->protocols()) 9620 if (ProtocolCompatibleWithProtocol(lProto, PI)) 9621 return true; 9622 return false; 9623 } 9624 9625 /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and 9626 /// Class<pr1, ...>. 9627 bool ASTContext::ObjCQualifiedClassTypesAreCompatible( 9628 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { 9629 for (auto *lhsProto : lhs->quals()) { 9630 bool match = false; 9631 for (auto *rhsProto : rhs->quals()) { 9632 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { 9633 match = true; 9634 break; 9635 } 9636 } 9637 if (!match) 9638 return false; 9639 } 9640 return true; 9641 } 9642 9643 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an 9644 /// ObjCQualifiedIDType. 9645 bool ASTContext::ObjCQualifiedIdTypesAreCompatible( 9646 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, 9647 bool compare) { 9648 // Allow id<P..> and an 'id' in all cases. 9649 if (lhs->isObjCIdType() || rhs->isObjCIdType()) 9650 return true; 9651 9652 // Don't allow id<P..> to convert to Class or Class<P..> in either direction. 9653 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || 9654 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) 9655 return false; 9656 9657 if (lhs->isObjCQualifiedIdType()) { 9658 if (rhs->qual_empty()) { 9659 // If the RHS is a unqualified interface pointer "NSString*", 9660 // make sure we check the class hierarchy. 9661 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9662 for (auto *I : lhs->quals()) { 9663 // when comparing an id<P> on lhs with a static type on rhs, 9664 // see if static class implements all of id's protocols, directly or 9665 // through its super class and categories. 9666 if (!rhsID->ClassImplementsProtocol(I, true)) 9667 return false; 9668 } 9669 } 9670 // If there are no qualifiers and no interface, we have an 'id'. 9671 return true; 9672 } 9673 // Both the right and left sides have qualifiers. 9674 for (auto *lhsProto : lhs->quals()) { 9675 bool match = false; 9676 9677 // when comparing an id<P> on lhs with a static type on rhs, 9678 // see if static class implements all of id's protocols, directly or 9679 // through its super class and categories. 9680 for (auto *rhsProto : rhs->quals()) { 9681 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9682 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9683 match = true; 9684 break; 9685 } 9686 } 9687 // If the RHS is a qualified interface pointer "NSString<P>*", 9688 // make sure we check the class hierarchy. 9689 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9690 for (auto *I : lhs->quals()) { 9691 // when comparing an id<P> on lhs with a static type on rhs, 9692 // see if static class implements all of id's protocols, directly or 9693 // through its super class and categories. 9694 if (rhsID->ClassImplementsProtocol(I, true)) { 9695 match = true; 9696 break; 9697 } 9698 } 9699 } 9700 if (!match) 9701 return false; 9702 } 9703 9704 return true; 9705 } 9706 9707 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>"); 9708 9709 if (lhs->getInterfaceType()) { 9710 // If both the right and left sides have qualifiers. 9711 for (auto *lhsProto : lhs->quals()) { 9712 bool match = false; 9713 9714 // when comparing an id<P> on rhs with a static type on lhs, 9715 // see if static class implements all of id's protocols, directly or 9716 // through its super class and categories. 9717 // First, lhs protocols in the qualifier list must be found, direct 9718 // or indirect in rhs's qualifier list or it is a mismatch. 9719 for (auto *rhsProto : rhs->quals()) { 9720 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9721 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9722 match = true; 9723 break; 9724 } 9725 } 9726 if (!match) 9727 return false; 9728 } 9729 9730 // Static class's protocols, or its super class or category protocols 9731 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. 9732 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { 9733 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 9734 CollectInheritedProtocols(lhsID, LHSInheritedProtocols); 9735 // This is rather dubious but matches gcc's behavior. If lhs has 9736 // no type qualifier and its class has no static protocol(s) 9737 // assume that it is mismatch. 9738 if (LHSInheritedProtocols.empty() && lhs->qual_empty()) 9739 return false; 9740 for (auto *lhsProto : LHSInheritedProtocols) { 9741 bool match = false; 9742 for (auto *rhsProto : rhs->quals()) { 9743 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9744 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9745 match = true; 9746 break; 9747 } 9748 } 9749 if (!match) 9750 return false; 9751 } 9752 } 9753 return true; 9754 } 9755 return false; 9756 } 9757 9758 /// canAssignObjCInterfaces - Return true if the two interface types are 9759 /// compatible for assignment from RHS to LHS. This handles validation of any 9760 /// protocol qualifiers on the LHS or RHS. 9761 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, 9762 const ObjCObjectPointerType *RHSOPT) { 9763 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9764 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9765 9766 // If either type represents the built-in 'id' type, return true. 9767 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) 9768 return true; 9769 9770 // Function object that propagates a successful result or handles 9771 // __kindof types. 9772 auto finish = [&](bool succeeded) -> bool { 9773 if (succeeded) 9774 return true; 9775 9776 if (!RHS->isKindOfType()) 9777 return false; 9778 9779 // Strip off __kindof and protocol qualifiers, then check whether 9780 // we can assign the other way. 9781 return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9782 LHSOPT->stripObjCKindOfTypeAndQuals(*this)); 9783 }; 9784 9785 // Casts from or to id<P> are allowed when the other side has compatible 9786 // protocols. 9787 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { 9788 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); 9789 } 9790 9791 // Verify protocol compatibility for casts from Class<P1> to Class<P2>. 9792 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { 9793 return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); 9794 } 9795 9796 // Casts from Class to Class<Foo>, or vice-versa, are allowed. 9797 if (LHS->isObjCClass() && RHS->isObjCClass()) { 9798 return true; 9799 } 9800 9801 // If we have 2 user-defined types, fall into that path. 9802 if (LHS->getInterface() && RHS->getInterface()) { 9803 return finish(canAssignObjCInterfaces(LHS, RHS)); 9804 } 9805 9806 return false; 9807 } 9808 9809 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written 9810 /// for providing type-safety for objective-c pointers used to pass/return 9811 /// arguments in block literals. When passed as arguments, passing 'A*' where 9812 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is 9813 /// not OK. For the return type, the opposite is not OK. 9814 bool ASTContext::canAssignObjCInterfacesInBlockPointer( 9815 const ObjCObjectPointerType *LHSOPT, 9816 const ObjCObjectPointerType *RHSOPT, 9817 bool BlockReturnType) { 9818 9819 // Function object that propagates a successful result or handles 9820 // __kindof types. 9821 auto finish = [&](bool succeeded) -> bool { 9822 if (succeeded) 9823 return true; 9824 9825 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; 9826 if (!Expected->isKindOfType()) 9827 return false; 9828 9829 // Strip off __kindof and protocol qualifiers, then check whether 9830 // we can assign the other way. 9831 return canAssignObjCInterfacesInBlockPointer( 9832 RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9833 LHSOPT->stripObjCKindOfTypeAndQuals(*this), 9834 BlockReturnType); 9835 }; 9836 9837 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) 9838 return true; 9839 9840 if (LHSOPT->isObjCBuiltinType()) { 9841 return finish(RHSOPT->isObjCBuiltinType() || 9842 RHSOPT->isObjCQualifiedIdType()); 9843 } 9844 9845 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { 9846 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) 9847 // Use for block parameters previous type checking for compatibility. 9848 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || 9849 // Or corrected type checking as in non-compat mode. 9850 (!BlockReturnType && 9851 ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); 9852 else 9853 return finish(ObjCQualifiedIdTypesAreCompatible( 9854 (BlockReturnType ? LHSOPT : RHSOPT), 9855 (BlockReturnType ? RHSOPT : LHSOPT), false)); 9856 } 9857 9858 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); 9859 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); 9860 if (LHS && RHS) { // We have 2 user-defined types. 9861 if (LHS != RHS) { 9862 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) 9863 return finish(BlockReturnType); 9864 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) 9865 return finish(!BlockReturnType); 9866 } 9867 else 9868 return true; 9869 } 9870 return false; 9871 } 9872 9873 /// Comparison routine for Objective-C protocols to be used with 9874 /// llvm::array_pod_sort. 9875 static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, 9876 ObjCProtocolDecl * const *rhs) { 9877 return (*lhs)->getName().compare((*rhs)->getName()); 9878 } 9879 9880 /// getIntersectionOfProtocols - This routine finds the intersection of set 9881 /// of protocols inherited from two distinct objective-c pointer objects with 9882 /// the given common base. 9883 /// It is used to build composite qualifier list of the composite type of 9884 /// the conditional expression involving two objective-c pointer objects. 9885 static 9886 void getIntersectionOfProtocols(ASTContext &Context, 9887 const ObjCInterfaceDecl *CommonBase, 9888 const ObjCObjectPointerType *LHSOPT, 9889 const ObjCObjectPointerType *RHSOPT, 9890 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) { 9891 9892 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9893 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9894 assert(LHS->getInterface() && "LHS must have an interface base"); 9895 assert(RHS->getInterface() && "RHS must have an interface base"); 9896 9897 // Add all of the protocols for the LHS. 9898 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet; 9899 9900 // Start with the protocol qualifiers. 9901 for (auto *proto : LHS->quals()) { 9902 Context.CollectInheritedProtocols(proto, LHSProtocolSet); 9903 } 9904 9905 // Also add the protocols associated with the LHS interface. 9906 Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); 9907 9908 // Add all of the protocols for the RHS. 9909 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet; 9910 9911 // Start with the protocol qualifiers. 9912 for (auto *proto : RHS->quals()) { 9913 Context.CollectInheritedProtocols(proto, RHSProtocolSet); 9914 } 9915 9916 // Also add the protocols associated with the RHS interface. 9917 Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); 9918 9919 // Compute the intersection of the collected protocol sets. 9920 for (auto *proto : LHSProtocolSet) { 9921 if (RHSProtocolSet.count(proto)) 9922 IntersectionSet.push_back(proto); 9923 } 9924 9925 // Compute the set of protocols that is implied by either the common type or 9926 // the protocols within the intersection. 9927 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols; 9928 Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); 9929 9930 // Remove any implied protocols from the list of inherited protocols. 9931 if (!ImpliedProtocols.empty()) { 9932 llvm::erase_if(IntersectionSet, [&](ObjCProtocolDecl *proto) -> bool { 9933 return ImpliedProtocols.contains(proto); 9934 }); 9935 } 9936 9937 // Sort the remaining protocols by name. 9938 llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), 9939 compareObjCProtocolsByName); 9940 } 9941 9942 /// Determine whether the first type is a subtype of the second. 9943 static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, 9944 QualType rhs) { 9945 // Common case: two object pointers. 9946 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>(); 9947 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 9948 if (lhsOPT && rhsOPT) 9949 return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); 9950 9951 // Two block pointers. 9952 const auto *lhsBlock = lhs->getAs<BlockPointerType>(); 9953 const auto *rhsBlock = rhs->getAs<BlockPointerType>(); 9954 if (lhsBlock && rhsBlock) 9955 return ctx.typesAreBlockPointerCompatible(lhs, rhs); 9956 9957 // If either is an unqualified 'id' and the other is a block, it's 9958 // acceptable. 9959 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || 9960 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) 9961 return true; 9962 9963 return false; 9964 } 9965 9966 // Check that the given Objective-C type argument lists are equivalent. 9967 static bool sameObjCTypeArgs(ASTContext &ctx, 9968 const ObjCInterfaceDecl *iface, 9969 ArrayRef<QualType> lhsArgs, 9970 ArrayRef<QualType> rhsArgs, 9971 bool stripKindOf) { 9972 if (lhsArgs.size() != rhsArgs.size()) 9973 return false; 9974 9975 ObjCTypeParamList *typeParams = iface->getTypeParamList(); 9976 if (!typeParams) 9977 return false; 9978 9979 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { 9980 if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) 9981 continue; 9982 9983 switch (typeParams->begin()[i]->getVariance()) { 9984 case ObjCTypeParamVariance::Invariant: 9985 if (!stripKindOf || 9986 !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), 9987 rhsArgs[i].stripObjCKindOfType(ctx))) { 9988 return false; 9989 } 9990 break; 9991 9992 case ObjCTypeParamVariance::Covariant: 9993 if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) 9994 return false; 9995 break; 9996 9997 case ObjCTypeParamVariance::Contravariant: 9998 if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) 9999 return false; 10000 break; 10001 } 10002 } 10003 10004 return true; 10005 } 10006 10007 QualType ASTContext::areCommonBaseCompatible( 10008 const ObjCObjectPointerType *Lptr, 10009 const ObjCObjectPointerType *Rptr) { 10010 const ObjCObjectType *LHS = Lptr->getObjectType(); 10011 const ObjCObjectType *RHS = Rptr->getObjectType(); 10012 const ObjCInterfaceDecl* LDecl = LHS->getInterface(); 10013 const ObjCInterfaceDecl* RDecl = RHS->getInterface(); 10014 10015 if (!LDecl || !RDecl) 10016 return {}; 10017 10018 // When either LHS or RHS is a kindof type, we should return a kindof type. 10019 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return 10020 // kindof(A). 10021 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); 10022 10023 // Follow the left-hand side up the class hierarchy until we either hit a 10024 // root or find the RHS. Record the ancestors in case we don't find it. 10025 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4> 10026 LHSAncestors; 10027 while (true) { 10028 // Record this ancestor. We'll need this if the common type isn't in the 10029 // path from the LHS to the root. 10030 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; 10031 10032 if (declaresSameEntity(LHS->getInterface(), RDecl)) { 10033 // Get the type arguments. 10034 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten(); 10035 bool anyChanges = false; 10036 if (LHS->isSpecialized() && RHS->isSpecialized()) { 10037 // Both have type arguments, compare them. 10038 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 10039 LHS->getTypeArgs(), RHS->getTypeArgs(), 10040 /*stripKindOf=*/true)) 10041 return {}; 10042 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 10043 // If only one has type arguments, the result will not have type 10044 // arguments. 10045 LHSTypeArgs = {}; 10046 anyChanges = true; 10047 } 10048 10049 // Compute the intersection of protocols. 10050 SmallVector<ObjCProtocolDecl *, 8> Protocols; 10051 getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, 10052 Protocols); 10053 if (!Protocols.empty()) 10054 anyChanges = true; 10055 10056 // If anything in the LHS will have changed, build a new result type. 10057 // If we need to return a kindof type but LHS is not a kindof type, we 10058 // build a new result type. 10059 if (anyChanges || LHS->isKindOfType() != anyKindOf) { 10060 QualType Result = getObjCInterfaceType(LHS->getInterface()); 10061 Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, 10062 anyKindOf || LHS->isKindOfType()); 10063 return getObjCObjectPointerType(Result); 10064 } 10065 10066 return getObjCObjectPointerType(QualType(LHS, 0)); 10067 } 10068 10069 // Find the superclass. 10070 QualType LHSSuperType = LHS->getSuperClassType(); 10071 if (LHSSuperType.isNull()) 10072 break; 10073 10074 LHS = LHSSuperType->castAs<ObjCObjectType>(); 10075 } 10076 10077 // We didn't find anything by following the LHS to its root; now check 10078 // the RHS against the cached set of ancestors. 10079 while (true) { 10080 auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); 10081 if (KnownLHS != LHSAncestors.end()) { 10082 LHS = KnownLHS->second; 10083 10084 // Get the type arguments. 10085 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten(); 10086 bool anyChanges = false; 10087 if (LHS->isSpecialized() && RHS->isSpecialized()) { 10088 // Both have type arguments, compare them. 10089 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 10090 LHS->getTypeArgs(), RHS->getTypeArgs(), 10091 /*stripKindOf=*/true)) 10092 return {}; 10093 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 10094 // If only one has type arguments, the result will not have type 10095 // arguments. 10096 RHSTypeArgs = {}; 10097 anyChanges = true; 10098 } 10099 10100 // Compute the intersection of protocols. 10101 SmallVector<ObjCProtocolDecl *, 8> Protocols; 10102 getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, 10103 Protocols); 10104 if (!Protocols.empty()) 10105 anyChanges = true; 10106 10107 // If we need to return a kindof type but RHS is not a kindof type, we 10108 // build a new result type. 10109 if (anyChanges || RHS->isKindOfType() != anyKindOf) { 10110 QualType Result = getObjCInterfaceType(RHS->getInterface()); 10111 Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, 10112 anyKindOf || RHS->isKindOfType()); 10113 return getObjCObjectPointerType(Result); 10114 } 10115 10116 return getObjCObjectPointerType(QualType(RHS, 0)); 10117 } 10118 10119 // Find the superclass of the RHS. 10120 QualType RHSSuperType = RHS->getSuperClassType(); 10121 if (RHSSuperType.isNull()) 10122 break; 10123 10124 RHS = RHSSuperType->castAs<ObjCObjectType>(); 10125 } 10126 10127 return {}; 10128 } 10129 10130 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, 10131 const ObjCObjectType *RHS) { 10132 assert(LHS->getInterface() && "LHS is not an interface type"); 10133 assert(RHS->getInterface() && "RHS is not an interface type"); 10134 10135 // Verify that the base decls are compatible: the RHS must be a subclass of 10136 // the LHS. 10137 ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); 10138 bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); 10139 if (!IsSuperClass) 10140 return false; 10141 10142 // If the LHS has protocol qualifiers, determine whether all of them are 10143 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the 10144 // LHS). 10145 if (LHS->getNumProtocols() > 0) { 10146 // OK if conversion of LHS to SuperClass results in narrowing of types 10147 // ; i.e., SuperClass may implement at least one of the protocols 10148 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. 10149 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. 10150 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; 10151 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); 10152 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's 10153 // qualifiers. 10154 for (auto *RHSPI : RHS->quals()) 10155 CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); 10156 // If there is no protocols associated with RHS, it is not a match. 10157 if (SuperClassInheritedProtocols.empty()) 10158 return false; 10159 10160 for (const auto *LHSProto : LHS->quals()) { 10161 bool SuperImplementsProtocol = false; 10162 for (auto *SuperClassProto : SuperClassInheritedProtocols) 10163 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { 10164 SuperImplementsProtocol = true; 10165 break; 10166 } 10167 if (!SuperImplementsProtocol) 10168 return false; 10169 } 10170 } 10171 10172 // If the LHS is specialized, we may need to check type arguments. 10173 if (LHS->isSpecialized()) { 10174 // Follow the superclass chain until we've matched the LHS class in the 10175 // hierarchy. This substitutes type arguments through. 10176 const ObjCObjectType *RHSSuper = RHS; 10177 while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) 10178 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>(); 10179 10180 // If the RHS is specializd, compare type arguments. 10181 if (RHSSuper->isSpecialized() && 10182 !sameObjCTypeArgs(*this, LHS->getInterface(), 10183 LHS->getTypeArgs(), RHSSuper->getTypeArgs(), 10184 /*stripKindOf=*/true)) { 10185 return false; 10186 } 10187 } 10188 10189 return true; 10190 } 10191 10192 bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { 10193 // get the "pointed to" types 10194 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); 10195 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); 10196 10197 if (!LHSOPT || !RHSOPT) 10198 return false; 10199 10200 return canAssignObjCInterfaces(LHSOPT, RHSOPT) || 10201 canAssignObjCInterfaces(RHSOPT, LHSOPT); 10202 } 10203 10204 bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { 10205 return canAssignObjCInterfaces( 10206 getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(), 10207 getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>()); 10208 } 10209 10210 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, 10211 /// both shall have the identically qualified version of a compatible type. 10212 /// C99 6.2.7p1: Two types have compatible types if their types are the 10213 /// same. See 6.7.[2,3,5] for additional rules. 10214 bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, 10215 bool CompareUnqualified) { 10216 if (getLangOpts().CPlusPlus) 10217 return hasSameType(LHS, RHS); 10218 10219 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); 10220 } 10221 10222 bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { 10223 return typesAreCompatible(LHS, RHS); 10224 } 10225 10226 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { 10227 return !mergeTypes(LHS, RHS, true).isNull(); 10228 } 10229 10230 /// mergeTransparentUnionType - if T is a transparent union type and a member 10231 /// of T is compatible with SubType, return the merged type, else return 10232 /// QualType() 10233 QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, 10234 bool OfBlockPointer, 10235 bool Unqualified) { 10236 if (const RecordType *UT = T->getAsUnionType()) { 10237 RecordDecl *UD = UT->getDecl(); 10238 if (UD->hasAttr<TransparentUnionAttr>()) { 10239 for (const auto *I : UD->fields()) { 10240 QualType ET = I->getType().getUnqualifiedType(); 10241 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); 10242 if (!MT.isNull()) 10243 return MT; 10244 } 10245 } 10246 } 10247 10248 return {}; 10249 } 10250 10251 /// mergeFunctionParameterTypes - merge two types which appear as function 10252 /// parameter types 10253 QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, 10254 bool OfBlockPointer, 10255 bool Unqualified) { 10256 // GNU extension: two types are compatible if they appear as a function 10257 // argument, one of the types is a transparent union type and the other 10258 // type is compatible with a union member 10259 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, 10260 Unqualified); 10261 if (!lmerge.isNull()) 10262 return lmerge; 10263 10264 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, 10265 Unqualified); 10266 if (!rmerge.isNull()) 10267 return rmerge; 10268 10269 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); 10270 } 10271 10272 QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, 10273 bool OfBlockPointer, bool Unqualified, 10274 bool AllowCXX, 10275 bool IsConditionalOperator) { 10276 const auto *lbase = lhs->castAs<FunctionType>(); 10277 const auto *rbase = rhs->castAs<FunctionType>(); 10278 const auto *lproto = dyn_cast<FunctionProtoType>(lbase); 10279 const auto *rproto = dyn_cast<FunctionProtoType>(rbase); 10280 bool allLTypes = true; 10281 bool allRTypes = true; 10282 10283 // Check return type 10284 QualType retType; 10285 if (OfBlockPointer) { 10286 QualType RHS = rbase->getReturnType(); 10287 QualType LHS = lbase->getReturnType(); 10288 bool UnqualifiedResult = Unqualified; 10289 if (!UnqualifiedResult) 10290 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); 10291 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); 10292 } 10293 else 10294 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, 10295 Unqualified); 10296 if (retType.isNull()) 10297 return {}; 10298 10299 if (Unqualified) 10300 retType = retType.getUnqualifiedType(); 10301 10302 CanQualType LRetType = getCanonicalType(lbase->getReturnType()); 10303 CanQualType RRetType = getCanonicalType(rbase->getReturnType()); 10304 if (Unqualified) { 10305 LRetType = LRetType.getUnqualifiedType(); 10306 RRetType = RRetType.getUnqualifiedType(); 10307 } 10308 10309 if (getCanonicalType(retType) != LRetType) 10310 allLTypes = false; 10311 if (getCanonicalType(retType) != RRetType) 10312 allRTypes = false; 10313 10314 // FIXME: double check this 10315 // FIXME: should we error if lbase->getRegParmAttr() != 0 && 10316 // rbase->getRegParmAttr() != 0 && 10317 // lbase->getRegParmAttr() != rbase->getRegParmAttr()? 10318 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); 10319 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); 10320 10321 // Compatible functions must have compatible calling conventions 10322 if (lbaseInfo.getCC() != rbaseInfo.getCC()) 10323 return {}; 10324 10325 // Regparm is part of the calling convention. 10326 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) 10327 return {}; 10328 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) 10329 return {}; 10330 10331 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) 10332 return {}; 10333 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) 10334 return {}; 10335 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) 10336 return {}; 10337 10338 // When merging declarations, it's common for supplemental information like 10339 // attributes to only be present in one of the declarations, and we generally 10340 // want type merging to preserve the union of information. So a merged 10341 // function type should be noreturn if it was noreturn in *either* operand 10342 // type. 10343 // 10344 // But for the conditional operator, this is backwards. The result of the 10345 // operator could be either operand, and its type should conservatively 10346 // reflect that. So a function type in a composite type is noreturn only 10347 // if it's noreturn in *both* operand types. 10348 // 10349 // Arguably, noreturn is a kind of subtype, and the conditional operator 10350 // ought to produce the most specific common supertype of its operand types. 10351 // That would differ from this rule in contravariant positions. However, 10352 // neither C nor C++ generally uses this kind of subtype reasoning. Also, 10353 // as a practical matter, it would only affect C code that does abstraction of 10354 // higher-order functions (taking noreturn callbacks!), which is uncommon to 10355 // say the least. So we use the simpler rule. 10356 bool NoReturn = IsConditionalOperator 10357 ? lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn() 10358 : lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); 10359 if (lbaseInfo.getNoReturn() != NoReturn) 10360 allLTypes = false; 10361 if (rbaseInfo.getNoReturn() != NoReturn) 10362 allRTypes = false; 10363 10364 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); 10365 10366 if (lproto && rproto) { // two C99 style function prototypes 10367 assert((AllowCXX || 10368 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && 10369 "C++ shouldn't be here"); 10370 // Compatible functions must have the same number of parameters 10371 if (lproto->getNumParams() != rproto->getNumParams()) 10372 return {}; 10373 10374 // Variadic and non-variadic functions aren't compatible 10375 if (lproto->isVariadic() != rproto->isVariadic()) 10376 return {}; 10377 10378 if (lproto->getMethodQuals() != rproto->getMethodQuals()) 10379 return {}; 10380 10381 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; 10382 bool canUseLeft, canUseRight; 10383 if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, 10384 newParamInfos)) 10385 return {}; 10386 10387 if (!canUseLeft) 10388 allLTypes = false; 10389 if (!canUseRight) 10390 allRTypes = false; 10391 10392 // Check parameter type compatibility 10393 SmallVector<QualType, 10> types; 10394 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { 10395 QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); 10396 QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); 10397 QualType paramType = mergeFunctionParameterTypes( 10398 lParamType, rParamType, OfBlockPointer, Unqualified); 10399 if (paramType.isNull()) 10400 return {}; 10401 10402 if (Unqualified) 10403 paramType = paramType.getUnqualifiedType(); 10404 10405 types.push_back(paramType); 10406 if (Unqualified) { 10407 lParamType = lParamType.getUnqualifiedType(); 10408 rParamType = rParamType.getUnqualifiedType(); 10409 } 10410 10411 if (getCanonicalType(paramType) != getCanonicalType(lParamType)) 10412 allLTypes = false; 10413 if (getCanonicalType(paramType) != getCanonicalType(rParamType)) 10414 allRTypes = false; 10415 } 10416 10417 if (allLTypes) return lhs; 10418 if (allRTypes) return rhs; 10419 10420 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); 10421 EPI.ExtInfo = einfo; 10422 EPI.ExtParameterInfos = 10423 newParamInfos.empty() ? nullptr : newParamInfos.data(); 10424 return getFunctionType(retType, types, EPI); 10425 } 10426 10427 if (lproto) allRTypes = false; 10428 if (rproto) allLTypes = false; 10429 10430 const FunctionProtoType *proto = lproto ? lproto : rproto; 10431 if (proto) { 10432 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here"); 10433 if (proto->isVariadic()) 10434 return {}; 10435 // Check that the types are compatible with the types that 10436 // would result from default argument promotions (C99 6.7.5.3p15). 10437 // The only types actually affected are promotable integer 10438 // types and floats, which would be passed as a different 10439 // type depending on whether the prototype is visible. 10440 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { 10441 QualType paramTy = proto->getParamType(i); 10442 10443 // Look at the converted type of enum types, since that is the type used 10444 // to pass enum values. 10445 if (const auto *Enum = paramTy->getAs<EnumType>()) { 10446 paramTy = Enum->getDecl()->getIntegerType(); 10447 if (paramTy.isNull()) 10448 return {}; 10449 } 10450 10451 if (isPromotableIntegerType(paramTy) || 10452 getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) 10453 return {}; 10454 } 10455 10456 if (allLTypes) return lhs; 10457 if (allRTypes) return rhs; 10458 10459 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); 10460 EPI.ExtInfo = einfo; 10461 return getFunctionType(retType, proto->getParamTypes(), EPI); 10462 } 10463 10464 if (allLTypes) return lhs; 10465 if (allRTypes) return rhs; 10466 return getFunctionNoProtoType(retType, einfo); 10467 } 10468 10469 /// Given that we have an enum type and a non-enum type, try to merge them. 10470 static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, 10471 QualType other, bool isBlockReturnType) { 10472 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, 10473 // a signed integer type, or an unsigned integer type. 10474 // Compatibility is based on the underlying type, not the promotion 10475 // type. 10476 QualType underlyingType = ET->getDecl()->getIntegerType(); 10477 if (underlyingType.isNull()) 10478 return {}; 10479 if (Context.hasSameType(underlyingType, other)) 10480 return other; 10481 10482 // In block return types, we're more permissive and accept any 10483 // integral type of the same size. 10484 if (isBlockReturnType && other->isIntegerType() && 10485 Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) 10486 return other; 10487 10488 return {}; 10489 } 10490 10491 QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, bool OfBlockPointer, 10492 bool Unqualified, bool BlockReturnType, 10493 bool IsConditionalOperator) { 10494 // For C++ we will not reach this code with reference types (see below), 10495 // for OpenMP variant call overloading we might. 10496 // 10497 // C++ [expr]: If an expression initially has the type "reference to T", the 10498 // type is adjusted to "T" prior to any further analysis, the expression 10499 // designates the object or function denoted by the reference, and the 10500 // expression is an lvalue unless the reference is an rvalue reference and 10501 // the expression is a function call (possibly inside parentheses). 10502 auto *LHSRefTy = LHS->getAs<ReferenceType>(); 10503 auto *RHSRefTy = RHS->getAs<ReferenceType>(); 10504 if (LangOpts.OpenMP && LHSRefTy && RHSRefTy && 10505 LHS->getTypeClass() == RHS->getTypeClass()) 10506 return mergeTypes(LHSRefTy->getPointeeType(), RHSRefTy->getPointeeType(), 10507 OfBlockPointer, Unqualified, BlockReturnType); 10508 if (LHSRefTy || RHSRefTy) 10509 return {}; 10510 10511 if (Unqualified) { 10512 LHS = LHS.getUnqualifiedType(); 10513 RHS = RHS.getUnqualifiedType(); 10514 } 10515 10516 QualType LHSCan = getCanonicalType(LHS), 10517 RHSCan = getCanonicalType(RHS); 10518 10519 // If two types are identical, they are compatible. 10520 if (LHSCan == RHSCan) 10521 return LHS; 10522 10523 // If the qualifiers are different, the types aren't compatible... mostly. 10524 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10525 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10526 if (LQuals != RQuals) { 10527 // If any of these qualifiers are different, we have a type 10528 // mismatch. 10529 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10530 LQuals.getAddressSpace() != RQuals.getAddressSpace() || 10531 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || 10532 LQuals.hasUnaligned() != RQuals.hasUnaligned()) 10533 return {}; 10534 10535 // Exactly one GC qualifier difference is allowed: __strong is 10536 // okay if the other type has no GC qualifier but is an Objective 10537 // C object pointer (i.e. implicitly strong by default). We fix 10538 // this by pretending that the unqualified type was actually 10539 // qualified __strong. 10540 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10541 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10542 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10543 10544 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10545 return {}; 10546 10547 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { 10548 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); 10549 } 10550 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { 10551 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); 10552 } 10553 return {}; 10554 } 10555 10556 // Okay, qualifiers are equal. 10557 10558 Type::TypeClass LHSClass = LHSCan->getTypeClass(); 10559 Type::TypeClass RHSClass = RHSCan->getTypeClass(); 10560 10561 // We want to consider the two function types to be the same for these 10562 // comparisons, just force one to the other. 10563 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; 10564 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; 10565 10566 // Same as above for arrays 10567 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) 10568 LHSClass = Type::ConstantArray; 10569 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) 10570 RHSClass = Type::ConstantArray; 10571 10572 // ObjCInterfaces are just specialized ObjCObjects. 10573 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; 10574 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; 10575 10576 // Canonicalize ExtVector -> Vector. 10577 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; 10578 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; 10579 10580 // If the canonical type classes don't match. 10581 if (LHSClass != RHSClass) { 10582 // Note that we only have special rules for turning block enum 10583 // returns into block int returns, not vice-versa. 10584 if (const auto *ETy = LHS->getAs<EnumType>()) { 10585 return mergeEnumWithInteger(*this, ETy, RHS, false); 10586 } 10587 if (const EnumType* ETy = RHS->getAs<EnumType>()) { 10588 return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); 10589 } 10590 // allow block pointer type to match an 'id' type. 10591 if (OfBlockPointer && !BlockReturnType) { 10592 if (LHS->isObjCIdType() && RHS->isBlockPointerType()) 10593 return LHS; 10594 if (RHS->isObjCIdType() && LHS->isBlockPointerType()) 10595 return RHS; 10596 } 10597 // Allow __auto_type to match anything; it merges to the type with more 10598 // information. 10599 if (const auto *AT = LHS->getAs<AutoType>()) { 10600 if (!AT->isDeduced() && AT->isGNUAutoType()) 10601 return RHS; 10602 } 10603 if (const auto *AT = RHS->getAs<AutoType>()) { 10604 if (!AT->isDeduced() && AT->isGNUAutoType()) 10605 return LHS; 10606 } 10607 return {}; 10608 } 10609 10610 // The canonical type classes match. 10611 switch (LHSClass) { 10612 #define TYPE(Class, Base) 10613 #define ABSTRACT_TYPE(Class, Base) 10614 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 10615 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 10616 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 10617 #include "clang/AST/TypeNodes.inc" 10618 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 10619 10620 case Type::Auto: 10621 case Type::DeducedTemplateSpecialization: 10622 case Type::LValueReference: 10623 case Type::RValueReference: 10624 case Type::MemberPointer: 10625 llvm_unreachable("C++ should never be in mergeTypes"); 10626 10627 case Type::ObjCInterface: 10628 case Type::IncompleteArray: 10629 case Type::VariableArray: 10630 case Type::FunctionProto: 10631 case Type::ExtVector: 10632 llvm_unreachable("Types are eliminated above"); 10633 10634 case Type::Pointer: 10635 { 10636 // Merge two pointer types, while trying to preserve typedef info 10637 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType(); 10638 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType(); 10639 if (Unqualified) { 10640 LHSPointee = LHSPointee.getUnqualifiedType(); 10641 RHSPointee = RHSPointee.getUnqualifiedType(); 10642 } 10643 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, 10644 Unqualified); 10645 if (ResultType.isNull()) 10646 return {}; 10647 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10648 return LHS; 10649 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10650 return RHS; 10651 return getPointerType(ResultType); 10652 } 10653 case Type::BlockPointer: 10654 { 10655 // Merge two block pointer types, while trying to preserve typedef info 10656 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType(); 10657 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType(); 10658 if (Unqualified) { 10659 LHSPointee = LHSPointee.getUnqualifiedType(); 10660 RHSPointee = RHSPointee.getUnqualifiedType(); 10661 } 10662 if (getLangOpts().OpenCL) { 10663 Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); 10664 Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); 10665 // Blocks can't be an expression in a ternary operator (OpenCL v2.0 10666 // 6.12.5) thus the following check is asymmetric. 10667 if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) 10668 return {}; 10669 LHSPteeQual.removeAddressSpace(); 10670 RHSPteeQual.removeAddressSpace(); 10671 LHSPointee = 10672 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); 10673 RHSPointee = 10674 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); 10675 } 10676 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, 10677 Unqualified); 10678 if (ResultType.isNull()) 10679 return {}; 10680 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 10681 return LHS; 10682 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 10683 return RHS; 10684 return getBlockPointerType(ResultType); 10685 } 10686 case Type::Atomic: 10687 { 10688 // Merge two pointer types, while trying to preserve typedef info 10689 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType(); 10690 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType(); 10691 if (Unqualified) { 10692 LHSValue = LHSValue.getUnqualifiedType(); 10693 RHSValue = RHSValue.getUnqualifiedType(); 10694 } 10695 QualType ResultType = mergeTypes(LHSValue, RHSValue, false, 10696 Unqualified); 10697 if (ResultType.isNull()) 10698 return {}; 10699 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) 10700 return LHS; 10701 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) 10702 return RHS; 10703 return getAtomicType(ResultType); 10704 } 10705 case Type::ConstantArray: 10706 { 10707 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); 10708 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); 10709 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) 10710 return {}; 10711 10712 QualType LHSElem = getAsArrayType(LHS)->getElementType(); 10713 QualType RHSElem = getAsArrayType(RHS)->getElementType(); 10714 if (Unqualified) { 10715 LHSElem = LHSElem.getUnqualifiedType(); 10716 RHSElem = RHSElem.getUnqualifiedType(); 10717 } 10718 10719 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); 10720 if (ResultType.isNull()) 10721 return {}; 10722 10723 const VariableArrayType* LVAT = getAsVariableArrayType(LHS); 10724 const VariableArrayType* RVAT = getAsVariableArrayType(RHS); 10725 10726 // If either side is a variable array, and both are complete, check whether 10727 // the current dimension is definite. 10728 if (LVAT || RVAT) { 10729 auto SizeFetch = [this](const VariableArrayType* VAT, 10730 const ConstantArrayType* CAT) 10731 -> std::pair<bool,llvm::APInt> { 10732 if (VAT) { 10733 std::optional<llvm::APSInt> TheInt; 10734 Expr *E = VAT->getSizeExpr(); 10735 if (E && (TheInt = E->getIntegerConstantExpr(*this))) 10736 return std::make_pair(true, *TheInt); 10737 return std::make_pair(false, llvm::APSInt()); 10738 } 10739 if (CAT) 10740 return std::make_pair(true, CAT->getSize()); 10741 return std::make_pair(false, llvm::APInt()); 10742 }; 10743 10744 bool HaveLSize, HaveRSize; 10745 llvm::APInt LSize, RSize; 10746 std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); 10747 std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); 10748 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) 10749 return {}; // Definite, but unequal, array dimension 10750 } 10751 10752 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10753 return LHS; 10754 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10755 return RHS; 10756 if (LCAT) 10757 return getConstantArrayType(ResultType, LCAT->getSize(), 10758 LCAT->getSizeExpr(), ArraySizeModifier(), 0); 10759 if (RCAT) 10760 return getConstantArrayType(ResultType, RCAT->getSize(), 10761 RCAT->getSizeExpr(), ArraySizeModifier(), 0); 10762 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10763 return LHS; 10764 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10765 return RHS; 10766 if (LVAT) { 10767 // FIXME: This isn't correct! But tricky to implement because 10768 // the array's size has to be the size of LHS, but the type 10769 // has to be different. 10770 return LHS; 10771 } 10772 if (RVAT) { 10773 // FIXME: This isn't correct! But tricky to implement because 10774 // the array's size has to be the size of RHS, but the type 10775 // has to be different. 10776 return RHS; 10777 } 10778 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; 10779 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; 10780 return getIncompleteArrayType(ResultType, ArraySizeModifier(), 0); 10781 } 10782 case Type::FunctionNoProto: 10783 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified, 10784 /*AllowCXX=*/false, IsConditionalOperator); 10785 case Type::Record: 10786 case Type::Enum: 10787 return {}; 10788 case Type::Builtin: 10789 // Only exactly equal builtin types are compatible, which is tested above. 10790 return {}; 10791 case Type::Complex: 10792 // Distinct complex types are incompatible. 10793 return {}; 10794 case Type::Vector: 10795 // FIXME: The merged type should be an ExtVector! 10796 if (areCompatVectorTypes(LHSCan->castAs<VectorType>(), 10797 RHSCan->castAs<VectorType>())) 10798 return LHS; 10799 return {}; 10800 case Type::ConstantMatrix: 10801 if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(), 10802 RHSCan->castAs<ConstantMatrixType>())) 10803 return LHS; 10804 return {}; 10805 case Type::ObjCObject: { 10806 // Check if the types are assignment compatible. 10807 // FIXME: This should be type compatibility, e.g. whether 10808 // "LHS x; RHS x;" at global scope is legal. 10809 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(), 10810 RHS->castAs<ObjCObjectType>())) 10811 return LHS; 10812 return {}; 10813 } 10814 case Type::ObjCObjectPointer: 10815 if (OfBlockPointer) { 10816 if (canAssignObjCInterfacesInBlockPointer( 10817 LHS->castAs<ObjCObjectPointerType>(), 10818 RHS->castAs<ObjCObjectPointerType>(), BlockReturnType)) 10819 return LHS; 10820 return {}; 10821 } 10822 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(), 10823 RHS->castAs<ObjCObjectPointerType>())) 10824 return LHS; 10825 return {}; 10826 case Type::Pipe: 10827 assert(LHS != RHS && 10828 "Equivalent pipe types should have already been handled!"); 10829 return {}; 10830 case Type::BitInt: { 10831 // Merge two bit-precise int types, while trying to preserve typedef info. 10832 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned(); 10833 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned(); 10834 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits(); 10835 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits(); 10836 10837 // Like unsigned/int, shouldn't have a type if they don't match. 10838 if (LHSUnsigned != RHSUnsigned) 10839 return {}; 10840 10841 if (LHSBits != RHSBits) 10842 return {}; 10843 return LHS; 10844 } 10845 } 10846 10847 llvm_unreachable("Invalid Type::Class!"); 10848 } 10849 10850 bool ASTContext::mergeExtParameterInfo( 10851 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, 10852 bool &CanUseFirst, bool &CanUseSecond, 10853 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) { 10854 assert(NewParamInfos.empty() && "param info list not empty"); 10855 CanUseFirst = CanUseSecond = true; 10856 bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); 10857 bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); 10858 10859 // Fast path: if the first type doesn't have ext parameter infos, 10860 // we match if and only if the second type also doesn't have them. 10861 if (!FirstHasInfo && !SecondHasInfo) 10862 return true; 10863 10864 bool NeedParamInfo = false; 10865 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() 10866 : SecondFnType->getExtParameterInfos().size(); 10867 10868 for (size_t I = 0; I < E; ++I) { 10869 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; 10870 if (FirstHasInfo) 10871 FirstParam = FirstFnType->getExtParameterInfo(I); 10872 if (SecondHasInfo) 10873 SecondParam = SecondFnType->getExtParameterInfo(I); 10874 10875 // Cannot merge unless everything except the noescape flag matches. 10876 if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) 10877 return false; 10878 10879 bool FirstNoEscape = FirstParam.isNoEscape(); 10880 bool SecondNoEscape = SecondParam.isNoEscape(); 10881 bool IsNoEscape = FirstNoEscape && SecondNoEscape; 10882 NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); 10883 if (NewParamInfos.back().getOpaqueValue()) 10884 NeedParamInfo = true; 10885 if (FirstNoEscape != IsNoEscape) 10886 CanUseFirst = false; 10887 if (SecondNoEscape != IsNoEscape) 10888 CanUseSecond = false; 10889 } 10890 10891 if (!NeedParamInfo) 10892 NewParamInfos.clear(); 10893 10894 return true; 10895 } 10896 10897 void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { 10898 ObjCLayouts[CD] = nullptr; 10899 } 10900 10901 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and 10902 /// 'RHS' attributes and returns the merged version; including for function 10903 /// return types. 10904 QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { 10905 QualType LHSCan = getCanonicalType(LHS), 10906 RHSCan = getCanonicalType(RHS); 10907 // If two types are identical, they are compatible. 10908 if (LHSCan == RHSCan) 10909 return LHS; 10910 if (RHSCan->isFunctionType()) { 10911 if (!LHSCan->isFunctionType()) 10912 return {}; 10913 QualType OldReturnType = 10914 cast<FunctionType>(RHSCan.getTypePtr())->getReturnType(); 10915 QualType NewReturnType = 10916 cast<FunctionType>(LHSCan.getTypePtr())->getReturnType(); 10917 QualType ResReturnType = 10918 mergeObjCGCQualifiers(NewReturnType, OldReturnType); 10919 if (ResReturnType.isNull()) 10920 return {}; 10921 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { 10922 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); 10923 // In either case, use OldReturnType to build the new function type. 10924 const auto *F = LHS->castAs<FunctionType>(); 10925 if (const auto *FPT = cast<FunctionProtoType>(F)) { 10926 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 10927 EPI.ExtInfo = getFunctionExtInfo(LHS); 10928 QualType ResultType = 10929 getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); 10930 return ResultType; 10931 } 10932 } 10933 return {}; 10934 } 10935 10936 // If the qualifiers are different, the types can still be merged. 10937 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10938 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10939 if (LQuals != RQuals) { 10940 // If any of these qualifiers are different, we have a type mismatch. 10941 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10942 LQuals.getAddressSpace() != RQuals.getAddressSpace()) 10943 return {}; 10944 10945 // Exactly one GC qualifier difference is allowed: __strong is 10946 // okay if the other type has no GC qualifier but is an Objective 10947 // C object pointer (i.e. implicitly strong by default). We fix 10948 // this by pretending that the unqualified type was actually 10949 // qualified __strong. 10950 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10951 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10952 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10953 10954 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10955 return {}; 10956 10957 if (GC_L == Qualifiers::Strong) 10958 return LHS; 10959 if (GC_R == Qualifiers::Strong) 10960 return RHS; 10961 return {}; 10962 } 10963 10964 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { 10965 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10966 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10967 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); 10968 if (ResQT == LHSBaseQT) 10969 return LHS; 10970 if (ResQT == RHSBaseQT) 10971 return RHS; 10972 } 10973 return {}; 10974 } 10975 10976 //===----------------------------------------------------------------------===// 10977 // Integer Predicates 10978 //===----------------------------------------------------------------------===// 10979 10980 unsigned ASTContext::getIntWidth(QualType T) const { 10981 if (const auto *ET = T->getAs<EnumType>()) 10982 T = ET->getDecl()->getIntegerType(); 10983 if (T->isBooleanType()) 10984 return 1; 10985 if (const auto *EIT = T->getAs<BitIntType>()) 10986 return EIT->getNumBits(); 10987 // For builtin types, just use the standard type sizing method 10988 return (unsigned)getTypeSize(T); 10989 } 10990 10991 QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { 10992 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 10993 T->isFixedPointType()) && 10994 "Unexpected type"); 10995 10996 // Turn <4 x signed int> -> <4 x unsigned int> 10997 if (const auto *VTy = T->getAs<VectorType>()) 10998 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), 10999 VTy->getNumElements(), VTy->getVectorKind()); 11000 11001 // For _BitInt, return an unsigned _BitInt with same width. 11002 if (const auto *EITy = T->getAs<BitIntType>()) 11003 return getBitIntType(/*Unsigned=*/true, EITy->getNumBits()); 11004 11005 // For enums, get the underlying integer type of the enum, and let the general 11006 // integer type signchanging code handle it. 11007 if (const auto *ETy = T->getAs<EnumType>()) 11008 T = ETy->getDecl()->getIntegerType(); 11009 11010 switch (T->castAs<BuiltinType>()->getKind()) { 11011 case BuiltinType::Char_U: 11012 // Plain `char` is mapped to `unsigned char` even if it's already unsigned 11013 case BuiltinType::Char_S: 11014 case BuiltinType::SChar: 11015 case BuiltinType::Char8: 11016 return UnsignedCharTy; 11017 case BuiltinType::Short: 11018 return UnsignedShortTy; 11019 case BuiltinType::Int: 11020 return UnsignedIntTy; 11021 case BuiltinType::Long: 11022 return UnsignedLongTy; 11023 case BuiltinType::LongLong: 11024 return UnsignedLongLongTy; 11025 case BuiltinType::Int128: 11026 return UnsignedInt128Ty; 11027 // wchar_t is special. It is either signed or not, but when it's signed, 11028 // there's no matching "unsigned wchar_t". Therefore we return the unsigned 11029 // version of its underlying type instead. 11030 case BuiltinType::WChar_S: 11031 return getUnsignedWCharType(); 11032 11033 case BuiltinType::ShortAccum: 11034 return UnsignedShortAccumTy; 11035 case BuiltinType::Accum: 11036 return UnsignedAccumTy; 11037 case BuiltinType::LongAccum: 11038 return UnsignedLongAccumTy; 11039 case BuiltinType::SatShortAccum: 11040 return SatUnsignedShortAccumTy; 11041 case BuiltinType::SatAccum: 11042 return SatUnsignedAccumTy; 11043 case BuiltinType::SatLongAccum: 11044 return SatUnsignedLongAccumTy; 11045 case BuiltinType::ShortFract: 11046 return UnsignedShortFractTy; 11047 case BuiltinType::Fract: 11048 return UnsignedFractTy; 11049 case BuiltinType::LongFract: 11050 return UnsignedLongFractTy; 11051 case BuiltinType::SatShortFract: 11052 return SatUnsignedShortFractTy; 11053 case BuiltinType::SatFract: 11054 return SatUnsignedFractTy; 11055 case BuiltinType::SatLongFract: 11056 return SatUnsignedLongFractTy; 11057 default: 11058 assert((T->hasUnsignedIntegerRepresentation() || 11059 T->isUnsignedFixedPointType()) && 11060 "Unexpected signed integer or fixed point type"); 11061 return T; 11062 } 11063 } 11064 11065 QualType ASTContext::getCorrespondingSignedType(QualType T) const { 11066 assert((T->hasIntegerRepresentation() || T->isEnumeralType() || 11067 T->isFixedPointType()) && 11068 "Unexpected type"); 11069 11070 // Turn <4 x unsigned int> -> <4 x signed int> 11071 if (const auto *VTy = T->getAs<VectorType>()) 11072 return getVectorType(getCorrespondingSignedType(VTy->getElementType()), 11073 VTy->getNumElements(), VTy->getVectorKind()); 11074 11075 // For _BitInt, return a signed _BitInt with same width. 11076 if (const auto *EITy = T->getAs<BitIntType>()) 11077 return getBitIntType(/*Unsigned=*/false, EITy->getNumBits()); 11078 11079 // For enums, get the underlying integer type of the enum, and let the general 11080 // integer type signchanging code handle it. 11081 if (const auto *ETy = T->getAs<EnumType>()) 11082 T = ETy->getDecl()->getIntegerType(); 11083 11084 switch (T->castAs<BuiltinType>()->getKind()) { 11085 case BuiltinType::Char_S: 11086 // Plain `char` is mapped to `signed char` even if it's already signed 11087 case BuiltinType::Char_U: 11088 case BuiltinType::UChar: 11089 case BuiltinType::Char8: 11090 return SignedCharTy; 11091 case BuiltinType::UShort: 11092 return ShortTy; 11093 case BuiltinType::UInt: 11094 return IntTy; 11095 case BuiltinType::ULong: 11096 return LongTy; 11097 case BuiltinType::ULongLong: 11098 return LongLongTy; 11099 case BuiltinType::UInt128: 11100 return Int128Ty; 11101 // wchar_t is special. It is either unsigned or not, but when it's unsigned, 11102 // there's no matching "signed wchar_t". Therefore we return the signed 11103 // version of its underlying type instead. 11104 case BuiltinType::WChar_U: 11105 return getSignedWCharType(); 11106 11107 case BuiltinType::UShortAccum: 11108 return ShortAccumTy; 11109 case BuiltinType::UAccum: 11110 return AccumTy; 11111 case BuiltinType::ULongAccum: 11112 return LongAccumTy; 11113 case BuiltinType::SatUShortAccum: 11114 return SatShortAccumTy; 11115 case BuiltinType::SatUAccum: 11116 return SatAccumTy; 11117 case BuiltinType::SatULongAccum: 11118 return SatLongAccumTy; 11119 case BuiltinType::UShortFract: 11120 return ShortFractTy; 11121 case BuiltinType::UFract: 11122 return FractTy; 11123 case BuiltinType::ULongFract: 11124 return LongFractTy; 11125 case BuiltinType::SatUShortFract: 11126 return SatShortFractTy; 11127 case BuiltinType::SatUFract: 11128 return SatFractTy; 11129 case BuiltinType::SatULongFract: 11130 return SatLongFractTy; 11131 default: 11132 assert( 11133 (T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && 11134 "Unexpected signed integer or fixed point type"); 11135 return T; 11136 } 11137 } 11138 11139 ASTMutationListener::~ASTMutationListener() = default; 11140 11141 void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, 11142 QualType ReturnType) {} 11143 11144 //===----------------------------------------------------------------------===// 11145 // Builtin Type Computation 11146 //===----------------------------------------------------------------------===// 11147 11148 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the 11149 /// pointer over the consumed characters. This returns the resultant type. If 11150 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic 11151 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of 11152 /// a vector of "i*". 11153 /// 11154 /// RequiresICE is filled in on return to indicate whether the value is required 11155 /// to be an Integer Constant Expression. 11156 static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, 11157 ASTContext::GetBuiltinTypeError &Error, 11158 bool &RequiresICE, 11159 bool AllowTypeModifiers) { 11160 // Modifiers. 11161 int HowLong = 0; 11162 bool Signed = false, Unsigned = false; 11163 RequiresICE = false; 11164 11165 // Read the prefixed modifiers first. 11166 bool Done = false; 11167 #ifndef NDEBUG 11168 bool IsSpecial = false; 11169 #endif 11170 while (!Done) { 11171 switch (*Str++) { 11172 default: Done = true; --Str; break; 11173 case 'I': 11174 RequiresICE = true; 11175 break; 11176 case 'S': 11177 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); 11178 assert(!Signed && "Can't use 'S' modifier multiple times!"); 11179 Signed = true; 11180 break; 11181 case 'U': 11182 assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); 11183 assert(!Unsigned && "Can't use 'U' modifier multiple times!"); 11184 Unsigned = true; 11185 break; 11186 case 'L': 11187 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers"); 11188 assert(HowLong <= 2 && "Can't have LLLL modifier"); 11189 ++HowLong; 11190 break; 11191 case 'N': 11192 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. 11193 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11194 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!"); 11195 #ifndef NDEBUG 11196 IsSpecial = true; 11197 #endif 11198 if (Context.getTargetInfo().getLongWidth() == 32) 11199 ++HowLong; 11200 break; 11201 case 'W': 11202 // This modifier represents int64 type. 11203 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11204 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!"); 11205 #ifndef NDEBUG 11206 IsSpecial = true; 11207 #endif 11208 switch (Context.getTargetInfo().getInt64Type()) { 11209 default: 11210 llvm_unreachable("Unexpected integer type"); 11211 case TargetInfo::SignedLong: 11212 HowLong = 1; 11213 break; 11214 case TargetInfo::SignedLongLong: 11215 HowLong = 2; 11216 break; 11217 } 11218 break; 11219 case 'Z': 11220 // This modifier represents int32 type. 11221 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11222 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!"); 11223 #ifndef NDEBUG 11224 IsSpecial = true; 11225 #endif 11226 switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { 11227 default: 11228 llvm_unreachable("Unexpected integer type"); 11229 case TargetInfo::SignedInt: 11230 HowLong = 0; 11231 break; 11232 case TargetInfo::SignedLong: 11233 HowLong = 1; 11234 break; 11235 case TargetInfo::SignedLongLong: 11236 HowLong = 2; 11237 break; 11238 } 11239 break; 11240 case 'O': 11241 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 11242 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!"); 11243 #ifndef NDEBUG 11244 IsSpecial = true; 11245 #endif 11246 if (Context.getLangOpts().OpenCL) 11247 HowLong = 1; 11248 else 11249 HowLong = 2; 11250 break; 11251 } 11252 } 11253 11254 QualType Type; 11255 11256 // Read the base type. 11257 switch (*Str++) { 11258 default: llvm_unreachable("Unknown builtin type letter!"); 11259 case 'x': 11260 assert(HowLong == 0 && !Signed && !Unsigned && 11261 "Bad modifiers used with 'x'!"); 11262 Type = Context.Float16Ty; 11263 break; 11264 case 'y': 11265 assert(HowLong == 0 && !Signed && !Unsigned && 11266 "Bad modifiers used with 'y'!"); 11267 Type = Context.BFloat16Ty; 11268 break; 11269 case 'v': 11270 assert(HowLong == 0 && !Signed && !Unsigned && 11271 "Bad modifiers used with 'v'!"); 11272 Type = Context.VoidTy; 11273 break; 11274 case 'h': 11275 assert(HowLong == 0 && !Signed && !Unsigned && 11276 "Bad modifiers used with 'h'!"); 11277 Type = Context.HalfTy; 11278 break; 11279 case 'f': 11280 assert(HowLong == 0 && !Signed && !Unsigned && 11281 "Bad modifiers used with 'f'!"); 11282 Type = Context.FloatTy; 11283 break; 11284 case 'd': 11285 assert(HowLong < 3 && !Signed && !Unsigned && 11286 "Bad modifiers used with 'd'!"); 11287 if (HowLong == 1) 11288 Type = Context.LongDoubleTy; 11289 else if (HowLong == 2) 11290 Type = Context.Float128Ty; 11291 else 11292 Type = Context.DoubleTy; 11293 break; 11294 case 's': 11295 assert(HowLong == 0 && "Bad modifiers used with 's'!"); 11296 if (Unsigned) 11297 Type = Context.UnsignedShortTy; 11298 else 11299 Type = Context.ShortTy; 11300 break; 11301 case 'i': 11302 if (HowLong == 3) 11303 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; 11304 else if (HowLong == 2) 11305 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; 11306 else if (HowLong == 1) 11307 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; 11308 else 11309 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; 11310 break; 11311 case 'c': 11312 assert(HowLong == 0 && "Bad modifiers used with 'c'!"); 11313 if (Signed) 11314 Type = Context.SignedCharTy; 11315 else if (Unsigned) 11316 Type = Context.UnsignedCharTy; 11317 else 11318 Type = Context.CharTy; 11319 break; 11320 case 'b': // boolean 11321 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); 11322 Type = Context.BoolTy; 11323 break; 11324 case 'z': // size_t. 11325 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); 11326 Type = Context.getSizeType(); 11327 break; 11328 case 'w': // wchar_t. 11329 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!"); 11330 Type = Context.getWideCharType(); 11331 break; 11332 case 'F': 11333 Type = Context.getCFConstantStringType(); 11334 break; 11335 case 'G': 11336 Type = Context.getObjCIdType(); 11337 break; 11338 case 'H': 11339 Type = Context.getObjCSelType(); 11340 break; 11341 case 'M': 11342 Type = Context.getObjCSuperType(); 11343 break; 11344 case 'a': 11345 Type = Context.getBuiltinVaListType(); 11346 assert(!Type.isNull() && "builtin va list type not initialized!"); 11347 break; 11348 case 'A': 11349 // This is a "reference" to a va_list; however, what exactly 11350 // this means depends on how va_list is defined. There are two 11351 // different kinds of va_list: ones passed by value, and ones 11352 // passed by reference. An example of a by-value va_list is 11353 // x86, where va_list is a char*. An example of by-ref va_list 11354 // is x86-64, where va_list is a __va_list_tag[1]. For x86, 11355 // we want this argument to be a char*&; for x86-64, we want 11356 // it to be a __va_list_tag*. 11357 Type = Context.getBuiltinVaListType(); 11358 assert(!Type.isNull() && "builtin va list type not initialized!"); 11359 if (Type->isArrayType()) 11360 Type = Context.getArrayDecayedType(Type); 11361 else 11362 Type = Context.getLValueReferenceType(Type); 11363 break; 11364 case 'q': { 11365 char *End; 11366 unsigned NumElements = strtoul(Str, &End, 10); 11367 assert(End != Str && "Missing vector size"); 11368 Str = End; 11369 11370 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11371 RequiresICE, false); 11372 assert(!RequiresICE && "Can't require vector ICE"); 11373 11374 Type = Context.getScalableVectorType(ElementType, NumElements); 11375 break; 11376 } 11377 case 'Q': { 11378 switch (*Str++) { 11379 case 'a': { 11380 Type = Context.SveCountTy; 11381 break; 11382 } 11383 default: 11384 llvm_unreachable("Unexpected target builtin type"); 11385 } 11386 break; 11387 } 11388 case 'V': { 11389 char *End; 11390 unsigned NumElements = strtoul(Str, &End, 10); 11391 assert(End != Str && "Missing vector size"); 11392 Str = End; 11393 11394 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 11395 RequiresICE, false); 11396 assert(!RequiresICE && "Can't require vector ICE"); 11397 11398 // TODO: No way to make AltiVec vectors in builtins yet. 11399 Type = Context.getVectorType(ElementType, NumElements, VectorKind::Generic); 11400 break; 11401 } 11402 case 'E': { 11403 char *End; 11404 11405 unsigned NumElements = strtoul(Str, &End, 10); 11406 assert(End != Str && "Missing vector size"); 11407 11408 Str = End; 11409 11410 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11411 false); 11412 Type = Context.getExtVectorType(ElementType, NumElements); 11413 break; 11414 } 11415 case 'X': { 11416 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 11417 false); 11418 assert(!RequiresICE && "Can't require complex ICE"); 11419 Type = Context.getComplexType(ElementType); 11420 break; 11421 } 11422 case 'Y': 11423 Type = Context.getPointerDiffType(); 11424 break; 11425 case 'P': 11426 Type = Context.getFILEType(); 11427 if (Type.isNull()) { 11428 Error = ASTContext::GE_Missing_stdio; 11429 return {}; 11430 } 11431 break; 11432 case 'J': 11433 if (Signed) 11434 Type = Context.getsigjmp_bufType(); 11435 else 11436 Type = Context.getjmp_bufType(); 11437 11438 if (Type.isNull()) { 11439 Error = ASTContext::GE_Missing_setjmp; 11440 return {}; 11441 } 11442 break; 11443 case 'K': 11444 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); 11445 Type = Context.getucontext_tType(); 11446 11447 if (Type.isNull()) { 11448 Error = ASTContext::GE_Missing_ucontext; 11449 return {}; 11450 } 11451 break; 11452 case 'p': 11453 Type = Context.getProcessIDType(); 11454 break; 11455 } 11456 11457 // If there are modifiers and if we're allowed to parse them, go for it. 11458 Done = !AllowTypeModifiers; 11459 while (!Done) { 11460 switch (char c = *Str++) { 11461 default: Done = true; --Str; break; 11462 case '*': 11463 case '&': { 11464 // Both pointers and references can have their pointee types 11465 // qualified with an address space. 11466 char *End; 11467 unsigned AddrSpace = strtoul(Str, &End, 10); 11468 if (End != Str) { 11469 // Note AddrSpace == 0 is not the same as an unspecified address space. 11470 Type = Context.getAddrSpaceQualType( 11471 Type, 11472 Context.getLangASForBuiltinAddressSpace(AddrSpace)); 11473 Str = End; 11474 } 11475 if (c == '*') 11476 Type = Context.getPointerType(Type); 11477 else 11478 Type = Context.getLValueReferenceType(Type); 11479 break; 11480 } 11481 // FIXME: There's no way to have a built-in with an rvalue ref arg. 11482 case 'C': 11483 Type = Type.withConst(); 11484 break; 11485 case 'D': 11486 Type = Context.getVolatileType(Type); 11487 break; 11488 case 'R': 11489 Type = Type.withRestrict(); 11490 break; 11491 } 11492 } 11493 11494 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && 11495 "Integer constant 'I' type must be an integer"); 11496 11497 return Type; 11498 } 11499 11500 // On some targets such as PowerPC, some of the builtins are defined with custom 11501 // type descriptors for target-dependent types. These descriptors are decoded in 11502 // other functions, but it may be useful to be able to fall back to default 11503 // descriptor decoding to define builtins mixing target-dependent and target- 11504 // independent types. This function allows decoding one type descriptor with 11505 // default decoding. 11506 QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context, 11507 GetBuiltinTypeError &Error, bool &RequireICE, 11508 bool AllowTypeModifiers) const { 11509 return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers); 11510 } 11511 11512 /// GetBuiltinType - Return the type for the specified builtin. 11513 QualType ASTContext::GetBuiltinType(unsigned Id, 11514 GetBuiltinTypeError &Error, 11515 unsigned *IntegerConstantArgs) const { 11516 const char *TypeStr = BuiltinInfo.getTypeString(Id); 11517 if (TypeStr[0] == '\0') { 11518 Error = GE_Missing_type; 11519 return {}; 11520 } 11521 11522 SmallVector<QualType, 8> ArgTypes; 11523 11524 bool RequiresICE = false; 11525 Error = GE_None; 11526 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, 11527 RequiresICE, true); 11528 if (Error != GE_None) 11529 return {}; 11530 11531 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); 11532 11533 while (TypeStr[0] && TypeStr[0] != '.') { 11534 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); 11535 if (Error != GE_None) 11536 return {}; 11537 11538 // If this argument is required to be an IntegerConstantExpression and the 11539 // caller cares, fill in the bitmask we return. 11540 if (RequiresICE && IntegerConstantArgs) 11541 *IntegerConstantArgs |= 1 << ArgTypes.size(); 11542 11543 // Do array -> pointer decay. The builtin should use the decayed type. 11544 if (Ty->isArrayType()) 11545 Ty = getArrayDecayedType(Ty); 11546 11547 ArgTypes.push_back(Ty); 11548 } 11549 11550 if (Id == Builtin::BI__GetExceptionInfo) 11551 return {}; 11552 11553 assert((TypeStr[0] != '.' || TypeStr[1] == 0) && 11554 "'.' should only occur at end of builtin type list!"); 11555 11556 bool Variadic = (TypeStr[0] == '.'); 11557 11558 FunctionType::ExtInfo EI(getDefaultCallingConvention( 11559 Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); 11560 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); 11561 11562 11563 // We really shouldn't be making a no-proto type here. 11564 if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes()) 11565 return getFunctionNoProtoType(ResType, EI); 11566 11567 FunctionProtoType::ExtProtoInfo EPI; 11568 EPI.ExtInfo = EI; 11569 EPI.Variadic = Variadic; 11570 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) 11571 EPI.ExceptionSpec.Type = 11572 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; 11573 11574 return getFunctionType(ResType, ArgTypes, EPI); 11575 } 11576 11577 static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, 11578 const FunctionDecl *FD) { 11579 if (!FD->isExternallyVisible()) 11580 return GVA_Internal; 11581 11582 // Non-user-provided functions get emitted as weak definitions with every 11583 // use, no matter whether they've been explicitly instantiated etc. 11584 if (!FD->isUserProvided()) 11585 return GVA_DiscardableODR; 11586 11587 GVALinkage External; 11588 switch (FD->getTemplateSpecializationKind()) { 11589 case TSK_Undeclared: 11590 case TSK_ExplicitSpecialization: 11591 External = GVA_StrongExternal; 11592 break; 11593 11594 case TSK_ExplicitInstantiationDefinition: 11595 return GVA_StrongODR; 11596 11597 // C++11 [temp.explicit]p10: 11598 // [ Note: The intent is that an inline function that is the subject of 11599 // an explicit instantiation declaration will still be implicitly 11600 // instantiated when used so that the body can be considered for 11601 // inlining, but that no out-of-line copy of the inline function would be 11602 // generated in the translation unit. -- end note ] 11603 case TSK_ExplicitInstantiationDeclaration: 11604 return GVA_AvailableExternally; 11605 11606 case TSK_ImplicitInstantiation: 11607 External = GVA_DiscardableODR; 11608 break; 11609 } 11610 11611 if (!FD->isInlined()) 11612 return External; 11613 11614 if ((!Context.getLangOpts().CPlusPlus && 11615 !Context.getTargetInfo().getCXXABI().isMicrosoft() && 11616 !FD->hasAttr<DLLExportAttr>()) || 11617 FD->hasAttr<GNUInlineAttr>()) { 11618 // FIXME: This doesn't match gcc's behavior for dllexport inline functions. 11619 11620 // GNU or C99 inline semantics. Determine whether this symbol should be 11621 // externally visible. 11622 if (FD->isInlineDefinitionExternallyVisible()) 11623 return External; 11624 11625 // C99 inline semantics, where the symbol is not externally visible. 11626 return GVA_AvailableExternally; 11627 } 11628 11629 // Functions specified with extern and inline in -fms-compatibility mode 11630 // forcibly get emitted. While the body of the function cannot be later 11631 // replaced, the function definition cannot be discarded. 11632 if (FD->isMSExternInline()) 11633 return GVA_StrongODR; 11634 11635 if (Context.getTargetInfo().getCXXABI().isMicrosoft() && 11636 isa<CXXConstructorDecl>(FD) && 11637 cast<CXXConstructorDecl>(FD)->isInheritingConstructor()) 11638 // Our approach to inheriting constructors is fundamentally different from 11639 // that used by the MS ABI, so keep our inheriting constructor thunks 11640 // internal rather than trying to pick an unambiguous mangling for them. 11641 return GVA_Internal; 11642 11643 return GVA_DiscardableODR; 11644 } 11645 11646 static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, 11647 const Decl *D, GVALinkage L) { 11648 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx 11649 // dllexport/dllimport on inline functions. 11650 if (D->hasAttr<DLLImportAttr>()) { 11651 if (L == GVA_DiscardableODR || L == GVA_StrongODR) 11652 return GVA_AvailableExternally; 11653 } else if (D->hasAttr<DLLExportAttr>()) { 11654 if (L == GVA_DiscardableODR) 11655 return GVA_StrongODR; 11656 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) { 11657 // Device-side functions with __global__ attribute must always be 11658 // visible externally so they can be launched from host. 11659 if (D->hasAttr<CUDAGlobalAttr>() && 11660 (L == GVA_DiscardableODR || L == GVA_Internal)) 11661 return GVA_StrongODR; 11662 // Single source offloading languages like CUDA/HIP need to be able to 11663 // access static device variables from host code of the same compilation 11664 // unit. This is done by externalizing the static variable with a shared 11665 // name between the host and device compilation which is the same for the 11666 // same compilation unit whereas different among different compilation 11667 // units. 11668 if (Context.shouldExternalize(D)) 11669 return GVA_StrongExternal; 11670 } 11671 return L; 11672 } 11673 11674 /// Adjust the GVALinkage for a declaration based on what an external AST source 11675 /// knows about whether there can be other definitions of this declaration. 11676 static GVALinkage 11677 adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, 11678 GVALinkage L) { 11679 ExternalASTSource *Source = Ctx.getExternalSource(); 11680 if (!Source) 11681 return L; 11682 11683 switch (Source->hasExternalDefinitions(D)) { 11684 case ExternalASTSource::EK_Never: 11685 // Other translation units rely on us to provide the definition. 11686 if (L == GVA_DiscardableODR) 11687 return GVA_StrongODR; 11688 break; 11689 11690 case ExternalASTSource::EK_Always: 11691 return GVA_AvailableExternally; 11692 11693 case ExternalASTSource::EK_ReplyHazy: 11694 break; 11695 } 11696 return L; 11697 } 11698 11699 GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { 11700 return adjustGVALinkageForExternalDefinitionKind(*this, FD, 11701 adjustGVALinkageForAttributes(*this, FD, 11702 basicGVALinkageForFunction(*this, FD))); 11703 } 11704 11705 static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, 11706 const VarDecl *VD) { 11707 // As an extension for interactive REPLs, make sure constant variables are 11708 // only emitted once instead of LinkageComputer::getLVForNamespaceScopeDecl 11709 // marking them as internal. 11710 if (Context.getLangOpts().CPlusPlus && 11711 Context.getLangOpts().IncrementalExtensions && 11712 VD->getType().isConstQualified() && 11713 !VD->getType().isVolatileQualified() && !VD->isInline() && 11714 !isa<VarTemplateSpecializationDecl>(VD) && !VD->getDescribedVarTemplate()) 11715 return GVA_DiscardableODR; 11716 11717 if (!VD->isExternallyVisible()) 11718 return GVA_Internal; 11719 11720 if (VD->isStaticLocal()) { 11721 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); 11722 while (LexicalContext && !isa<FunctionDecl>(LexicalContext)) 11723 LexicalContext = LexicalContext->getLexicalParent(); 11724 11725 // ObjC Blocks can create local variables that don't have a FunctionDecl 11726 // LexicalContext. 11727 if (!LexicalContext) 11728 return GVA_DiscardableODR; 11729 11730 // Otherwise, let the static local variable inherit its linkage from the 11731 // nearest enclosing function. 11732 auto StaticLocalLinkage = 11733 Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext)); 11734 11735 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must 11736 // be emitted in any object with references to the symbol for the object it 11737 // contains, whether inline or out-of-line." 11738 // Similar behavior is observed with MSVC. An alternative ABI could use 11739 // StrongODR/AvailableExternally to match the function, but none are 11740 // known/supported currently. 11741 if (StaticLocalLinkage == GVA_StrongODR || 11742 StaticLocalLinkage == GVA_AvailableExternally) 11743 return GVA_DiscardableODR; 11744 return StaticLocalLinkage; 11745 } 11746 11747 // MSVC treats in-class initialized static data members as definitions. 11748 // By giving them non-strong linkage, out-of-line definitions won't 11749 // cause link errors. 11750 if (Context.isMSStaticDataMemberInlineDefinition(VD)) 11751 return GVA_DiscardableODR; 11752 11753 // Most non-template variables have strong linkage; inline variables are 11754 // linkonce_odr or (occasionally, for compatibility) weak_odr. 11755 GVALinkage StrongLinkage; 11756 switch (Context.getInlineVariableDefinitionKind(VD)) { 11757 case ASTContext::InlineVariableDefinitionKind::None: 11758 StrongLinkage = GVA_StrongExternal; 11759 break; 11760 case ASTContext::InlineVariableDefinitionKind::Weak: 11761 case ASTContext::InlineVariableDefinitionKind::WeakUnknown: 11762 StrongLinkage = GVA_DiscardableODR; 11763 break; 11764 case ASTContext::InlineVariableDefinitionKind::Strong: 11765 StrongLinkage = GVA_StrongODR; 11766 break; 11767 } 11768 11769 switch (VD->getTemplateSpecializationKind()) { 11770 case TSK_Undeclared: 11771 return StrongLinkage; 11772 11773 case TSK_ExplicitSpecialization: 11774 return Context.getTargetInfo().getCXXABI().isMicrosoft() && 11775 VD->isStaticDataMember() 11776 ? GVA_StrongODR 11777 : StrongLinkage; 11778 11779 case TSK_ExplicitInstantiationDefinition: 11780 return GVA_StrongODR; 11781 11782 case TSK_ExplicitInstantiationDeclaration: 11783 return GVA_AvailableExternally; 11784 11785 case TSK_ImplicitInstantiation: 11786 return GVA_DiscardableODR; 11787 } 11788 11789 llvm_unreachable("Invalid Linkage!"); 11790 } 11791 11792 GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) const { 11793 return adjustGVALinkageForExternalDefinitionKind(*this, VD, 11794 adjustGVALinkageForAttributes(*this, VD, 11795 basicGVALinkageForVariable(*this, VD))); 11796 } 11797 11798 bool ASTContext::DeclMustBeEmitted(const Decl *D) { 11799 if (const auto *VD = dyn_cast<VarDecl>(D)) { 11800 if (!VD->isFileVarDecl()) 11801 return false; 11802 // Global named register variables (GNU extension) are never emitted. 11803 if (VD->getStorageClass() == SC_Register) 11804 return false; 11805 if (VD->getDescribedVarTemplate() || 11806 isa<VarTemplatePartialSpecializationDecl>(VD)) 11807 return false; 11808 } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11809 // We never need to emit an uninstantiated function template. 11810 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 11811 return false; 11812 } else if (isa<PragmaCommentDecl>(D)) 11813 return true; 11814 else if (isa<PragmaDetectMismatchDecl>(D)) 11815 return true; 11816 else if (isa<OMPRequiresDecl>(D)) 11817 return true; 11818 else if (isa<OMPThreadPrivateDecl>(D)) 11819 return !D->getDeclContext()->isDependentContext(); 11820 else if (isa<OMPAllocateDecl>(D)) 11821 return !D->getDeclContext()->isDependentContext(); 11822 else if (isa<OMPDeclareReductionDecl>(D) || isa<OMPDeclareMapperDecl>(D)) 11823 return !D->getDeclContext()->isDependentContext(); 11824 else if (isa<ImportDecl>(D)) 11825 return true; 11826 else 11827 return false; 11828 11829 // If this is a member of a class template, we do not need to emit it. 11830 if (D->getDeclContext()->isDependentContext()) 11831 return false; 11832 11833 // Weak references don't produce any output by themselves. 11834 if (D->hasAttr<WeakRefAttr>()) 11835 return false; 11836 11837 // Aliases and used decls are required. 11838 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) 11839 return true; 11840 11841 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11842 // Forward declarations aren't required. 11843 if (!FD->doesThisDeclarationHaveABody()) 11844 return FD->doesDeclarationForceExternallyVisibleDefinition(); 11845 11846 // Constructors and destructors are required. 11847 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) 11848 return true; 11849 11850 // The key function for a class is required. This rule only comes 11851 // into play when inline functions can be key functions, though. 11852 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { 11853 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) { 11854 const CXXRecordDecl *RD = MD->getParent(); 11855 if (MD->isOutOfLine() && RD->isDynamicClass()) { 11856 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); 11857 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) 11858 return true; 11859 } 11860 } 11861 } 11862 11863 GVALinkage Linkage = GetGVALinkageForFunction(FD); 11864 11865 // static, static inline, always_inline, and extern inline functions can 11866 // always be deferred. Normal inline functions can be deferred in C99/C++. 11867 // Implicit template instantiations can also be deferred in C++. 11868 return !isDiscardableGVALinkage(Linkage); 11869 } 11870 11871 const auto *VD = cast<VarDecl>(D); 11872 assert(VD->isFileVarDecl() && "Expected file scoped var"); 11873 11874 // If the decl is marked as `declare target to`, it should be emitted for the 11875 // host and for the device. 11876 if (LangOpts.OpenMP && 11877 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 11878 return true; 11879 11880 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && 11881 !isMSStaticDataMemberInlineDefinition(VD)) 11882 return false; 11883 11884 // Variables in other module units shouldn't be forced to be emitted. 11885 if (VD->isInAnotherModuleUnit()) 11886 return false; 11887 11888 // Variables that can be needed in other TUs are required. 11889 auto Linkage = GetGVALinkageForVariable(VD); 11890 if (!isDiscardableGVALinkage(Linkage)) 11891 return true; 11892 11893 // We never need to emit a variable that is available in another TU. 11894 if (Linkage == GVA_AvailableExternally) 11895 return false; 11896 11897 // Variables that have destruction with side-effects are required. 11898 if (VD->needsDestruction(*this)) 11899 return true; 11900 11901 // Variables that have initialization with side-effects are required. 11902 if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && 11903 // We can get a value-dependent initializer during error recovery. 11904 (VD->getInit()->isValueDependent() || !VD->evaluateValue())) 11905 return true; 11906 11907 // Likewise, variables with tuple-like bindings are required if their 11908 // bindings have side-effects. 11909 if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) 11910 for (const auto *BD : DD->bindings()) 11911 if (const auto *BindingVD = BD->getHoldingVar()) 11912 if (DeclMustBeEmitted(BindingVD)) 11913 return true; 11914 11915 return false; 11916 } 11917 11918 void ASTContext::forEachMultiversionedFunctionVersion( 11919 const FunctionDecl *FD, 11920 llvm::function_ref<void(FunctionDecl *)> Pred) const { 11921 assert(FD->isMultiVersion() && "Only valid for multiversioned functions"); 11922 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls; 11923 FD = FD->getMostRecentDecl(); 11924 // FIXME: The order of traversal here matters and depends on the order of 11925 // lookup results, which happens to be (mostly) oldest-to-newest, but we 11926 // shouldn't rely on that. 11927 for (auto *CurDecl : 11928 FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { 11929 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); 11930 if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && 11931 !SeenDecls.contains(CurFD)) { 11932 SeenDecls.insert(CurFD); 11933 Pred(CurFD); 11934 } 11935 } 11936 } 11937 11938 CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, 11939 bool IsCXXMethod, 11940 bool IsBuiltin) const { 11941 // Pass through to the C++ ABI object 11942 if (IsCXXMethod) 11943 return ABI->getDefaultMethodCallConv(IsVariadic); 11944 11945 // Builtins ignore user-specified default calling convention and remain the 11946 // Target's default calling convention. 11947 if (!IsBuiltin) { 11948 switch (LangOpts.getDefaultCallingConv()) { 11949 case LangOptions::DCC_None: 11950 break; 11951 case LangOptions::DCC_CDecl: 11952 return CC_C; 11953 case LangOptions::DCC_FastCall: 11954 if (getTargetInfo().hasFeature("sse2") && !IsVariadic) 11955 return CC_X86FastCall; 11956 break; 11957 case LangOptions::DCC_StdCall: 11958 if (!IsVariadic) 11959 return CC_X86StdCall; 11960 break; 11961 case LangOptions::DCC_VectorCall: 11962 // __vectorcall cannot be applied to variadic functions. 11963 if (!IsVariadic) 11964 return CC_X86VectorCall; 11965 break; 11966 case LangOptions::DCC_RegCall: 11967 // __regcall cannot be applied to variadic functions. 11968 if (!IsVariadic) 11969 return CC_X86RegCall; 11970 break; 11971 case LangOptions::DCC_RtdCall: 11972 if (!IsVariadic) 11973 return CC_M68kRTD; 11974 break; 11975 } 11976 } 11977 return Target->getDefaultCallingConv(); 11978 } 11979 11980 bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { 11981 // Pass through to the C++ ABI object 11982 return ABI->isNearlyEmpty(RD); 11983 } 11984 11985 VTableContextBase *ASTContext::getVTableContext() { 11986 if (!VTContext.get()) { 11987 auto ABI = Target->getCXXABI(); 11988 if (ABI.isMicrosoft()) 11989 VTContext.reset(new MicrosoftVTableContext(*this)); 11990 else { 11991 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables 11992 ? ItaniumVTableContext::Relative 11993 : ItaniumVTableContext::Pointer; 11994 VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); 11995 } 11996 } 11997 return VTContext.get(); 11998 } 11999 12000 MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { 12001 if (!T) 12002 T = Target; 12003 switch (T->getCXXABI().getKind()) { 12004 case TargetCXXABI::AppleARM64: 12005 case TargetCXXABI::Fuchsia: 12006 case TargetCXXABI::GenericAArch64: 12007 case TargetCXXABI::GenericItanium: 12008 case TargetCXXABI::GenericARM: 12009 case TargetCXXABI::GenericMIPS: 12010 case TargetCXXABI::iOS: 12011 case TargetCXXABI::WebAssembly: 12012 case TargetCXXABI::WatchOS: 12013 case TargetCXXABI::XL: 12014 return ItaniumMangleContext::create(*this, getDiagnostics()); 12015 case TargetCXXABI::Microsoft: 12016 return MicrosoftMangleContext::create(*this, getDiagnostics()); 12017 } 12018 llvm_unreachable("Unsupported ABI"); 12019 } 12020 12021 MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) { 12022 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft && 12023 "Device mangle context does not support Microsoft mangling."); 12024 switch (T.getCXXABI().getKind()) { 12025 case TargetCXXABI::AppleARM64: 12026 case TargetCXXABI::Fuchsia: 12027 case TargetCXXABI::GenericAArch64: 12028 case TargetCXXABI::GenericItanium: 12029 case TargetCXXABI::GenericARM: 12030 case TargetCXXABI::GenericMIPS: 12031 case TargetCXXABI::iOS: 12032 case TargetCXXABI::WebAssembly: 12033 case TargetCXXABI::WatchOS: 12034 case TargetCXXABI::XL: 12035 return ItaniumMangleContext::create( 12036 *this, getDiagnostics(), 12037 [](ASTContext &, const NamedDecl *ND) -> std::optional<unsigned> { 12038 if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) 12039 return RD->getDeviceLambdaManglingNumber(); 12040 return std::nullopt; 12041 }, 12042 /*IsAux=*/true); 12043 case TargetCXXABI::Microsoft: 12044 return MicrosoftMangleContext::create(*this, getDiagnostics(), 12045 /*IsAux=*/true); 12046 } 12047 llvm_unreachable("Unsupported ABI"); 12048 } 12049 12050 CXXABI::~CXXABI() = default; 12051 12052 size_t ASTContext::getSideTableAllocatedMemory() const { 12053 return ASTRecordLayouts.getMemorySize() + 12054 llvm::capacity_in_bytes(ObjCLayouts) + 12055 llvm::capacity_in_bytes(KeyFunctions) + 12056 llvm::capacity_in_bytes(ObjCImpls) + 12057 llvm::capacity_in_bytes(BlockVarCopyInits) + 12058 llvm::capacity_in_bytes(DeclAttrs) + 12059 llvm::capacity_in_bytes(TemplateOrInstantiation) + 12060 llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + 12061 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + 12062 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + 12063 llvm::capacity_in_bytes(OverriddenMethods) + 12064 llvm::capacity_in_bytes(Types) + 12065 llvm::capacity_in_bytes(VariableArrayTypes); 12066 } 12067 12068 /// getIntTypeForBitwidth - 12069 /// sets integer QualTy according to specified details: 12070 /// bitwidth, signed/unsigned. 12071 /// Returns empty type if there is no appropriate target types. 12072 QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, 12073 unsigned Signed) const { 12074 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); 12075 CanQualType QualTy = getFromTargetType(Ty); 12076 if (!QualTy && DestWidth == 128) 12077 return Signed ? Int128Ty : UnsignedInt128Ty; 12078 return QualTy; 12079 } 12080 12081 /// getRealTypeForBitwidth - 12082 /// sets floating point QualTy according to specified bitwidth. 12083 /// Returns empty type if there is no appropriate target types. 12084 QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, 12085 FloatModeKind ExplicitType) const { 12086 FloatModeKind Ty = 12087 getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitType); 12088 switch (Ty) { 12089 case FloatModeKind::Half: 12090 return HalfTy; 12091 case FloatModeKind::Float: 12092 return FloatTy; 12093 case FloatModeKind::Double: 12094 return DoubleTy; 12095 case FloatModeKind::LongDouble: 12096 return LongDoubleTy; 12097 case FloatModeKind::Float128: 12098 return Float128Ty; 12099 case FloatModeKind::Ibm128: 12100 return Ibm128Ty; 12101 case FloatModeKind::NoFloat: 12102 return {}; 12103 } 12104 12105 llvm_unreachable("Unhandled TargetInfo::RealType value"); 12106 } 12107 12108 void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { 12109 if (Number > 1) 12110 MangleNumbers[ND] = Number; 12111 } 12112 12113 unsigned ASTContext::getManglingNumber(const NamedDecl *ND, 12114 bool ForAuxTarget) const { 12115 auto I = MangleNumbers.find(ND); 12116 unsigned Res = I != MangleNumbers.end() ? I->second : 1; 12117 // CUDA/HIP host compilation encodes host and device mangling numbers 12118 // as lower and upper half of 32 bit integer. 12119 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) { 12120 Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF; 12121 } else { 12122 assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling " 12123 "number for aux target"); 12124 } 12125 return Res > 1 ? Res : 1; 12126 } 12127 12128 void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { 12129 if (Number > 1) 12130 StaticLocalNumbers[VD] = Number; 12131 } 12132 12133 unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { 12134 auto I = StaticLocalNumbers.find(VD); 12135 return I != StaticLocalNumbers.end() ? I->second : 1; 12136 } 12137 12138 MangleNumberingContext & 12139 ASTContext::getManglingNumberContext(const DeclContext *DC) { 12140 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12141 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC]; 12142 if (!MCtx) 12143 MCtx = createMangleNumberingContext(); 12144 return *MCtx; 12145 } 12146 12147 MangleNumberingContext & 12148 ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { 12149 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 12150 std::unique_ptr<MangleNumberingContext> &MCtx = 12151 ExtraMangleNumberingContexts[D]; 12152 if (!MCtx) 12153 MCtx = createMangleNumberingContext(); 12154 return *MCtx; 12155 } 12156 12157 std::unique_ptr<MangleNumberingContext> 12158 ASTContext::createMangleNumberingContext() const { 12159 return ABI->createMangleNumberingContext(); 12160 } 12161 12162 const CXXConstructorDecl * 12163 ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { 12164 return ABI->getCopyConstructorForExceptionObject( 12165 cast<CXXRecordDecl>(RD->getFirstDecl())); 12166 } 12167 12168 void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, 12169 CXXConstructorDecl *CD) { 12170 return ABI->addCopyConstructorForExceptionObject( 12171 cast<CXXRecordDecl>(RD->getFirstDecl()), 12172 cast<CXXConstructorDecl>(CD->getFirstDecl())); 12173 } 12174 12175 void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, 12176 TypedefNameDecl *DD) { 12177 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); 12178 } 12179 12180 TypedefNameDecl * 12181 ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { 12182 return ABI->getTypedefNameForUnnamedTagDecl(TD); 12183 } 12184 12185 void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, 12186 DeclaratorDecl *DD) { 12187 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); 12188 } 12189 12190 DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { 12191 return ABI->getDeclaratorForUnnamedTagDecl(TD); 12192 } 12193 12194 void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { 12195 ParamIndices[D] = index; 12196 } 12197 12198 unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { 12199 ParameterIndexTable::const_iterator I = ParamIndices.find(D); 12200 assert(I != ParamIndices.end() && 12201 "ParmIndices lacks entry set by ParmVarDecl"); 12202 return I->second; 12203 } 12204 12205 QualType ASTContext::getStringLiteralArrayType(QualType EltTy, 12206 unsigned Length) const { 12207 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). 12208 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) 12209 EltTy = EltTy.withConst(); 12210 12211 EltTy = adjustStringLiteralBaseType(EltTy); 12212 12213 // Get an array type for the string, according to C99 6.4.5. This includes 12214 // the null terminator character. 12215 return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, 12216 ArraySizeModifier::Normal, /*IndexTypeQuals*/ 0); 12217 } 12218 12219 StringLiteral * 12220 ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { 12221 StringLiteral *&Result = StringLiteralCache[Key]; 12222 if (!Result) 12223 Result = StringLiteral::Create( 12224 *this, Key, StringLiteralKind::Ordinary, 12225 /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), 12226 SourceLocation()); 12227 return Result; 12228 } 12229 12230 MSGuidDecl * 12231 ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { 12232 assert(MSGuidTagDecl && "building MS GUID without MS extensions?"); 12233 12234 llvm::FoldingSetNodeID ID; 12235 MSGuidDecl::Profile(ID, Parts); 12236 12237 void *InsertPos; 12238 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) 12239 return Existing; 12240 12241 QualType GUIDType = getMSGuidType().withConst(); 12242 MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); 12243 MSGuidDecls.InsertNode(New, InsertPos); 12244 return New; 12245 } 12246 12247 UnnamedGlobalConstantDecl * 12248 ASTContext::getUnnamedGlobalConstantDecl(QualType Ty, 12249 const APValue &APVal) const { 12250 llvm::FoldingSetNodeID ID; 12251 UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal); 12252 12253 void *InsertPos; 12254 if (UnnamedGlobalConstantDecl *Existing = 12255 UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos)) 12256 return Existing; 12257 12258 UnnamedGlobalConstantDecl *New = 12259 UnnamedGlobalConstantDecl::Create(*this, Ty, APVal); 12260 UnnamedGlobalConstantDecls.InsertNode(New, InsertPos); 12261 return New; 12262 } 12263 12264 TemplateParamObjectDecl * 12265 ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { 12266 assert(T->isRecordType() && "template param object of unexpected type"); 12267 12268 // C++ [temp.param]p8: 12269 // [...] a static storage duration object of type 'const T' [...] 12270 T.addConst(); 12271 12272 llvm::FoldingSetNodeID ID; 12273 TemplateParamObjectDecl::Profile(ID, T, V); 12274 12275 void *InsertPos; 12276 if (TemplateParamObjectDecl *Existing = 12277 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos)) 12278 return Existing; 12279 12280 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V); 12281 TemplateParamObjectDecls.InsertNode(New, InsertPos); 12282 return New; 12283 } 12284 12285 bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { 12286 const llvm::Triple &T = getTargetInfo().getTriple(); 12287 if (!T.isOSDarwin()) 12288 return false; 12289 12290 if (!(T.isiOS() && T.isOSVersionLT(7)) && 12291 !(T.isMacOSX() && T.isOSVersionLT(10, 9))) 12292 return false; 12293 12294 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 12295 CharUnits sizeChars = getTypeSizeInChars(AtomicTy); 12296 uint64_t Size = sizeChars.getQuantity(); 12297 CharUnits alignChars = getTypeAlignInChars(AtomicTy); 12298 unsigned Align = alignChars.getQuantity(); 12299 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); 12300 return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); 12301 } 12302 12303 bool 12304 ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, 12305 const ObjCMethodDecl *MethodImpl) { 12306 // No point trying to match an unavailable/deprecated mothod. 12307 if (MethodDecl->hasAttr<UnavailableAttr>() 12308 || MethodDecl->hasAttr<DeprecatedAttr>()) 12309 return false; 12310 if (MethodDecl->getObjCDeclQualifier() != 12311 MethodImpl->getObjCDeclQualifier()) 12312 return false; 12313 if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) 12314 return false; 12315 12316 if (MethodDecl->param_size() != MethodImpl->param_size()) 12317 return false; 12318 12319 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), 12320 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), 12321 EF = MethodDecl->param_end(); 12322 IM != EM && IF != EF; ++IM, ++IF) { 12323 const ParmVarDecl *DeclVar = (*IF); 12324 const ParmVarDecl *ImplVar = (*IM); 12325 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) 12326 return false; 12327 if (!hasSameType(DeclVar->getType(), ImplVar->getType())) 12328 return false; 12329 } 12330 12331 return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); 12332 } 12333 12334 uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { 12335 LangAS AS; 12336 if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) 12337 AS = LangAS::Default; 12338 else 12339 AS = QT->getPointeeType().getAddressSpace(); 12340 12341 return getTargetInfo().getNullPointerValue(AS); 12342 } 12343 12344 unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { 12345 return getTargetInfo().getTargetAddressSpace(AS); 12346 } 12347 12348 bool ASTContext::hasSameExpr(const Expr *X, const Expr *Y) const { 12349 if (X == Y) 12350 return true; 12351 if (!X || !Y) 12352 return false; 12353 llvm::FoldingSetNodeID IDX, IDY; 12354 X->Profile(IDX, *this, /*Canonical=*/true); 12355 Y->Profile(IDY, *this, /*Canonical=*/true); 12356 return IDX == IDY; 12357 } 12358 12359 // The getCommon* helpers return, for given 'same' X and Y entities given as 12360 // inputs, another entity which is also the 'same' as the inputs, but which 12361 // is closer to the canonical form of the inputs, each according to a given 12362 // criteria. 12363 // The getCommon*Checked variants are 'null inputs not-allowed' equivalents of 12364 // the regular ones. 12365 12366 static Decl *getCommonDecl(Decl *X, Decl *Y) { 12367 if (!declaresSameEntity(X, Y)) 12368 return nullptr; 12369 for (const Decl *DX : X->redecls()) { 12370 // If we reach Y before reaching the first decl, that means X is older. 12371 if (DX == Y) 12372 return X; 12373 // If we reach the first decl, then Y is older. 12374 if (DX->isFirstDecl()) 12375 return Y; 12376 } 12377 llvm_unreachable("Corrupt redecls chain"); 12378 } 12379 12380 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12381 static T *getCommonDecl(T *X, T *Y) { 12382 return cast_or_null<T>( 12383 getCommonDecl(const_cast<Decl *>(cast_or_null<Decl>(X)), 12384 const_cast<Decl *>(cast_or_null<Decl>(Y)))); 12385 } 12386 12387 template <class T, std::enable_if_t<std::is_base_of_v<Decl, T>, bool> = true> 12388 static T *getCommonDeclChecked(T *X, T *Y) { 12389 return cast<T>(getCommonDecl(const_cast<Decl *>(cast<Decl>(X)), 12390 const_cast<Decl *>(cast<Decl>(Y)))); 12391 } 12392 12393 static TemplateName getCommonTemplateName(ASTContext &Ctx, TemplateName X, 12394 TemplateName Y) { 12395 if (X.getAsVoidPointer() == Y.getAsVoidPointer()) 12396 return X; 12397 // FIXME: There are cases here where we could find a common template name 12398 // with more sugar. For example one could be a SubstTemplateTemplate* 12399 // replacing the other. 12400 TemplateName CX = Ctx.getCanonicalTemplateName(X); 12401 if (CX.getAsVoidPointer() != 12402 Ctx.getCanonicalTemplateName(Y).getAsVoidPointer()) 12403 return TemplateName(); 12404 return CX; 12405 } 12406 12407 static TemplateName 12408 getCommonTemplateNameChecked(ASTContext &Ctx, TemplateName X, TemplateName Y) { 12409 TemplateName R = getCommonTemplateName(Ctx, X, Y); 12410 assert(R.getAsVoidPointer() != nullptr); 12411 return R; 12412 } 12413 12414 static auto getCommonTypes(ASTContext &Ctx, ArrayRef<QualType> Xs, 12415 ArrayRef<QualType> Ys, bool Unqualified = false) { 12416 assert(Xs.size() == Ys.size()); 12417 SmallVector<QualType, 8> Rs(Xs.size()); 12418 for (size_t I = 0; I < Rs.size(); ++I) 12419 Rs[I] = Ctx.getCommonSugaredType(Xs[I], Ys[I], Unqualified); 12420 return Rs; 12421 } 12422 12423 template <class T> 12424 static SourceLocation getCommonAttrLoc(const T *X, const T *Y) { 12425 return X->getAttributeLoc() == Y->getAttributeLoc() ? X->getAttributeLoc() 12426 : SourceLocation(); 12427 } 12428 12429 static TemplateArgument getCommonTemplateArgument(ASTContext &Ctx, 12430 const TemplateArgument &X, 12431 const TemplateArgument &Y) { 12432 if (X.getKind() != Y.getKind()) 12433 return TemplateArgument(); 12434 12435 switch (X.getKind()) { 12436 case TemplateArgument::ArgKind::Type: 12437 if (!Ctx.hasSameType(X.getAsType(), Y.getAsType())) 12438 return TemplateArgument(); 12439 return TemplateArgument( 12440 Ctx.getCommonSugaredType(X.getAsType(), Y.getAsType())); 12441 case TemplateArgument::ArgKind::NullPtr: 12442 if (!Ctx.hasSameType(X.getNullPtrType(), Y.getNullPtrType())) 12443 return TemplateArgument(); 12444 return TemplateArgument( 12445 Ctx.getCommonSugaredType(X.getNullPtrType(), Y.getNullPtrType()), 12446 /*Unqualified=*/true); 12447 case TemplateArgument::ArgKind::Expression: 12448 if (!Ctx.hasSameType(X.getAsExpr()->getType(), Y.getAsExpr()->getType())) 12449 return TemplateArgument(); 12450 // FIXME: Try to keep the common sugar. 12451 return X; 12452 case TemplateArgument::ArgKind::Template: { 12453 TemplateName TX = X.getAsTemplate(), TY = Y.getAsTemplate(); 12454 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12455 if (!CTN.getAsVoidPointer()) 12456 return TemplateArgument(); 12457 return TemplateArgument(CTN); 12458 } 12459 case TemplateArgument::ArgKind::TemplateExpansion: { 12460 TemplateName TX = X.getAsTemplateOrTemplatePattern(), 12461 TY = Y.getAsTemplateOrTemplatePattern(); 12462 TemplateName CTN = ::getCommonTemplateName(Ctx, TX, TY); 12463 if (!CTN.getAsVoidPointer()) 12464 return TemplateName(); 12465 auto NExpX = X.getNumTemplateExpansions(); 12466 assert(NExpX == Y.getNumTemplateExpansions()); 12467 return TemplateArgument(CTN, NExpX); 12468 } 12469 default: 12470 // FIXME: Handle the other argument kinds. 12471 return X; 12472 } 12473 } 12474 12475 static bool getCommonTemplateArguments(ASTContext &Ctx, 12476 SmallVectorImpl<TemplateArgument> &R, 12477 ArrayRef<TemplateArgument> Xs, 12478 ArrayRef<TemplateArgument> Ys) { 12479 if (Xs.size() != Ys.size()) 12480 return true; 12481 R.resize(Xs.size()); 12482 for (size_t I = 0; I < R.size(); ++I) { 12483 R[I] = getCommonTemplateArgument(Ctx, Xs[I], Ys[I]); 12484 if (R[I].isNull()) 12485 return true; 12486 } 12487 return false; 12488 } 12489 12490 static auto getCommonTemplateArguments(ASTContext &Ctx, 12491 ArrayRef<TemplateArgument> Xs, 12492 ArrayRef<TemplateArgument> Ys) { 12493 SmallVector<TemplateArgument, 8> R; 12494 bool Different = getCommonTemplateArguments(Ctx, R, Xs, Ys); 12495 assert(!Different); 12496 (void)Different; 12497 return R; 12498 } 12499 12500 template <class T> 12501 static ElaboratedTypeKeyword getCommonTypeKeyword(const T *X, const T *Y) { 12502 return X->getKeyword() == Y->getKeyword() ? X->getKeyword() 12503 : ElaboratedTypeKeyword::None; 12504 } 12505 12506 template <class T> 12507 static NestedNameSpecifier *getCommonNNS(ASTContext &Ctx, const T *X, 12508 const T *Y) { 12509 // FIXME: Try to keep the common NNS sugar. 12510 return X->getQualifier() == Y->getQualifier() 12511 ? X->getQualifier() 12512 : Ctx.getCanonicalNestedNameSpecifier(X->getQualifier()); 12513 } 12514 12515 template <class T> 12516 static QualType getCommonElementType(ASTContext &Ctx, const T *X, const T *Y) { 12517 return Ctx.getCommonSugaredType(X->getElementType(), Y->getElementType()); 12518 } 12519 12520 template <class T> 12521 static QualType getCommonArrayElementType(ASTContext &Ctx, const T *X, 12522 Qualifiers &QX, const T *Y, 12523 Qualifiers &QY) { 12524 QualType EX = X->getElementType(), EY = Y->getElementType(); 12525 QualType R = Ctx.getCommonSugaredType(EX, EY, 12526 /*Unqualified=*/true); 12527 Qualifiers RQ = R.getQualifiers(); 12528 QX += EX.getQualifiers() - RQ; 12529 QY += EY.getQualifiers() - RQ; 12530 return R; 12531 } 12532 12533 template <class T> 12534 static QualType getCommonPointeeType(ASTContext &Ctx, const T *X, const T *Y) { 12535 return Ctx.getCommonSugaredType(X->getPointeeType(), Y->getPointeeType()); 12536 } 12537 12538 template <class T> static auto *getCommonSizeExpr(ASTContext &Ctx, T *X, T *Y) { 12539 assert(Ctx.hasSameExpr(X->getSizeExpr(), Y->getSizeExpr())); 12540 return X->getSizeExpr(); 12541 } 12542 12543 static auto getCommonSizeModifier(const ArrayType *X, const ArrayType *Y) { 12544 assert(X->getSizeModifier() == Y->getSizeModifier()); 12545 return X->getSizeModifier(); 12546 } 12547 12548 static auto getCommonIndexTypeCVRQualifiers(const ArrayType *X, 12549 const ArrayType *Y) { 12550 assert(X->getIndexTypeCVRQualifiers() == Y->getIndexTypeCVRQualifiers()); 12551 return X->getIndexTypeCVRQualifiers(); 12552 } 12553 12554 // Merges two type lists such that the resulting vector will contain 12555 // each type (in a canonical sense) only once, in the order they appear 12556 // from X to Y. If they occur in both X and Y, the result will contain 12557 // the common sugared type between them. 12558 static void mergeTypeLists(ASTContext &Ctx, SmallVectorImpl<QualType> &Out, 12559 ArrayRef<QualType> X, ArrayRef<QualType> Y) { 12560 llvm::DenseMap<QualType, unsigned> Found; 12561 for (auto Ts : {X, Y}) { 12562 for (QualType T : Ts) { 12563 auto Res = Found.try_emplace(Ctx.getCanonicalType(T), Out.size()); 12564 if (!Res.second) { 12565 QualType &U = Out[Res.first->second]; 12566 U = Ctx.getCommonSugaredType(U, T); 12567 } else { 12568 Out.emplace_back(T); 12569 } 12570 } 12571 } 12572 } 12573 12574 FunctionProtoType::ExceptionSpecInfo 12575 ASTContext::mergeExceptionSpecs(FunctionProtoType::ExceptionSpecInfo ESI1, 12576 FunctionProtoType::ExceptionSpecInfo ESI2, 12577 SmallVectorImpl<QualType> &ExceptionTypeStorage, 12578 bool AcceptDependent) { 12579 ExceptionSpecificationType EST1 = ESI1.Type, EST2 = ESI2.Type; 12580 12581 // If either of them can throw anything, that is the result. 12582 for (auto I : {EST_None, EST_MSAny, EST_NoexceptFalse}) { 12583 if (EST1 == I) 12584 return ESI1; 12585 if (EST2 == I) 12586 return ESI2; 12587 } 12588 12589 // If either of them is non-throwing, the result is the other. 12590 for (auto I : 12591 {EST_NoThrow, EST_DynamicNone, EST_BasicNoexcept, EST_NoexceptTrue}) { 12592 if (EST1 == I) 12593 return ESI2; 12594 if (EST2 == I) 12595 return ESI1; 12596 } 12597 12598 // If we're left with value-dependent computed noexcept expressions, we're 12599 // stuck. Before C++17, we can just drop the exception specification entirely, 12600 // since it's not actually part of the canonical type. And this should never 12601 // happen in C++17, because it would mean we were computing the composite 12602 // pointer type of dependent types, which should never happen. 12603 if (EST1 == EST_DependentNoexcept || EST2 == EST_DependentNoexcept) { 12604 assert(AcceptDependent && 12605 "computing composite pointer type of dependent types"); 12606 return FunctionProtoType::ExceptionSpecInfo(); 12607 } 12608 12609 // Switch over the possibilities so that people adding new values know to 12610 // update this function. 12611 switch (EST1) { 12612 case EST_None: 12613 case EST_DynamicNone: 12614 case EST_MSAny: 12615 case EST_BasicNoexcept: 12616 case EST_DependentNoexcept: 12617 case EST_NoexceptFalse: 12618 case EST_NoexceptTrue: 12619 case EST_NoThrow: 12620 llvm_unreachable("These ESTs should be handled above"); 12621 12622 case EST_Dynamic: { 12623 // This is the fun case: both exception specifications are dynamic. Form 12624 // the union of the two lists. 12625 assert(EST2 == EST_Dynamic && "other cases should already be handled"); 12626 mergeTypeLists(*this, ExceptionTypeStorage, ESI1.Exceptions, 12627 ESI2.Exceptions); 12628 FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic); 12629 Result.Exceptions = ExceptionTypeStorage; 12630 return Result; 12631 } 12632 12633 case EST_Unevaluated: 12634 case EST_Uninstantiated: 12635 case EST_Unparsed: 12636 llvm_unreachable("shouldn't see unresolved exception specifications here"); 12637 } 12638 12639 llvm_unreachable("invalid ExceptionSpecificationType"); 12640 } 12641 12642 static QualType getCommonNonSugarTypeNode(ASTContext &Ctx, const Type *X, 12643 Qualifiers &QX, const Type *Y, 12644 Qualifiers &QY) { 12645 Type::TypeClass TC = X->getTypeClass(); 12646 assert(TC == Y->getTypeClass()); 12647 switch (TC) { 12648 #define UNEXPECTED_TYPE(Class, Kind) \ 12649 case Type::Class: \ 12650 llvm_unreachable("Unexpected " Kind ": " #Class); 12651 12652 #define NON_CANONICAL_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "non-canonical") 12653 #define TYPE(Class, Base) 12654 #include "clang/AST/TypeNodes.inc" 12655 12656 #define SUGAR_FREE_TYPE(Class) UNEXPECTED_TYPE(Class, "sugar-free") 12657 SUGAR_FREE_TYPE(Builtin) 12658 SUGAR_FREE_TYPE(DeducedTemplateSpecialization) 12659 SUGAR_FREE_TYPE(DependentBitInt) 12660 SUGAR_FREE_TYPE(Enum) 12661 SUGAR_FREE_TYPE(BitInt) 12662 SUGAR_FREE_TYPE(ObjCInterface) 12663 SUGAR_FREE_TYPE(Record) 12664 SUGAR_FREE_TYPE(SubstTemplateTypeParmPack) 12665 SUGAR_FREE_TYPE(UnresolvedUsing) 12666 #undef SUGAR_FREE_TYPE 12667 #define NON_UNIQUE_TYPE(Class) UNEXPECTED_TYPE(Class, "non-unique") 12668 NON_UNIQUE_TYPE(TypeOfExpr) 12669 NON_UNIQUE_TYPE(VariableArray) 12670 #undef NON_UNIQUE_TYPE 12671 12672 UNEXPECTED_TYPE(TypeOf, "sugar") 12673 12674 #undef UNEXPECTED_TYPE 12675 12676 case Type::Auto: { 12677 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 12678 assert(AX->getDeducedType().isNull()); 12679 assert(AY->getDeducedType().isNull()); 12680 assert(AX->getKeyword() == AY->getKeyword()); 12681 assert(AX->isInstantiationDependentType() == 12682 AY->isInstantiationDependentType()); 12683 auto As = getCommonTemplateArguments(Ctx, AX->getTypeConstraintArguments(), 12684 AY->getTypeConstraintArguments()); 12685 return Ctx.getAutoType(QualType(), AX->getKeyword(), 12686 AX->isInstantiationDependentType(), 12687 AX->containsUnexpandedParameterPack(), 12688 getCommonDeclChecked(AX->getTypeConstraintConcept(), 12689 AY->getTypeConstraintConcept()), 12690 As); 12691 } 12692 case Type::IncompleteArray: { 12693 const auto *AX = cast<IncompleteArrayType>(X), 12694 *AY = cast<IncompleteArrayType>(Y); 12695 return Ctx.getIncompleteArrayType( 12696 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 12697 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 12698 } 12699 case Type::DependentSizedArray: { 12700 const auto *AX = cast<DependentSizedArrayType>(X), 12701 *AY = cast<DependentSizedArrayType>(Y); 12702 return Ctx.getDependentSizedArrayType( 12703 getCommonArrayElementType(Ctx, AX, QX, AY, QY), 12704 getCommonSizeExpr(Ctx, AX, AY), getCommonSizeModifier(AX, AY), 12705 getCommonIndexTypeCVRQualifiers(AX, AY), 12706 AX->getBracketsRange() == AY->getBracketsRange() 12707 ? AX->getBracketsRange() 12708 : SourceRange()); 12709 } 12710 case Type::ConstantArray: { 12711 const auto *AX = cast<ConstantArrayType>(X), 12712 *AY = cast<ConstantArrayType>(Y); 12713 assert(AX->getSize() == AY->getSize()); 12714 const Expr *SizeExpr = Ctx.hasSameExpr(AX->getSizeExpr(), AY->getSizeExpr()) 12715 ? AX->getSizeExpr() 12716 : nullptr; 12717 return Ctx.getConstantArrayType( 12718 getCommonArrayElementType(Ctx, AX, QX, AY, QY), AX->getSize(), SizeExpr, 12719 getCommonSizeModifier(AX, AY), getCommonIndexTypeCVRQualifiers(AX, AY)); 12720 } 12721 case Type::Atomic: { 12722 const auto *AX = cast<AtomicType>(X), *AY = cast<AtomicType>(Y); 12723 return Ctx.getAtomicType( 12724 Ctx.getCommonSugaredType(AX->getValueType(), AY->getValueType())); 12725 } 12726 case Type::Complex: { 12727 const auto *CX = cast<ComplexType>(X), *CY = cast<ComplexType>(Y); 12728 return Ctx.getComplexType(getCommonArrayElementType(Ctx, CX, QX, CY, QY)); 12729 } 12730 case Type::Pointer: { 12731 const auto *PX = cast<PointerType>(X), *PY = cast<PointerType>(Y); 12732 return Ctx.getPointerType(getCommonPointeeType(Ctx, PX, PY)); 12733 } 12734 case Type::BlockPointer: { 12735 const auto *PX = cast<BlockPointerType>(X), *PY = cast<BlockPointerType>(Y); 12736 return Ctx.getBlockPointerType(getCommonPointeeType(Ctx, PX, PY)); 12737 } 12738 case Type::ObjCObjectPointer: { 12739 const auto *PX = cast<ObjCObjectPointerType>(X), 12740 *PY = cast<ObjCObjectPointerType>(Y); 12741 return Ctx.getObjCObjectPointerType(getCommonPointeeType(Ctx, PX, PY)); 12742 } 12743 case Type::MemberPointer: { 12744 const auto *PX = cast<MemberPointerType>(X), 12745 *PY = cast<MemberPointerType>(Y); 12746 return Ctx.getMemberPointerType( 12747 getCommonPointeeType(Ctx, PX, PY), 12748 Ctx.getCommonSugaredType(QualType(PX->getClass(), 0), 12749 QualType(PY->getClass(), 0)) 12750 .getTypePtr()); 12751 } 12752 case Type::LValueReference: { 12753 const auto *PX = cast<LValueReferenceType>(X), 12754 *PY = cast<LValueReferenceType>(Y); 12755 // FIXME: Preserve PointeeTypeAsWritten. 12756 return Ctx.getLValueReferenceType(getCommonPointeeType(Ctx, PX, PY), 12757 PX->isSpelledAsLValue() || 12758 PY->isSpelledAsLValue()); 12759 } 12760 case Type::RValueReference: { 12761 const auto *PX = cast<RValueReferenceType>(X), 12762 *PY = cast<RValueReferenceType>(Y); 12763 // FIXME: Preserve PointeeTypeAsWritten. 12764 return Ctx.getRValueReferenceType(getCommonPointeeType(Ctx, PX, PY)); 12765 } 12766 case Type::DependentAddressSpace: { 12767 const auto *PX = cast<DependentAddressSpaceType>(X), 12768 *PY = cast<DependentAddressSpaceType>(Y); 12769 assert(Ctx.hasSameExpr(PX->getAddrSpaceExpr(), PY->getAddrSpaceExpr())); 12770 return Ctx.getDependentAddressSpaceType(getCommonPointeeType(Ctx, PX, PY), 12771 PX->getAddrSpaceExpr(), 12772 getCommonAttrLoc(PX, PY)); 12773 } 12774 case Type::FunctionNoProto: { 12775 const auto *FX = cast<FunctionNoProtoType>(X), 12776 *FY = cast<FunctionNoProtoType>(Y); 12777 assert(FX->getExtInfo() == FY->getExtInfo()); 12778 return Ctx.getFunctionNoProtoType( 12779 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()), 12780 FX->getExtInfo()); 12781 } 12782 case Type::FunctionProto: { 12783 const auto *FX = cast<FunctionProtoType>(X), 12784 *FY = cast<FunctionProtoType>(Y); 12785 FunctionProtoType::ExtProtoInfo EPIX = FX->getExtProtoInfo(), 12786 EPIY = FY->getExtProtoInfo(); 12787 assert(EPIX.ExtInfo == EPIY.ExtInfo); 12788 assert(EPIX.ExtParameterInfos == EPIY.ExtParameterInfos); 12789 assert(EPIX.RefQualifier == EPIY.RefQualifier); 12790 assert(EPIX.TypeQuals == EPIY.TypeQuals); 12791 assert(EPIX.Variadic == EPIY.Variadic); 12792 12793 // FIXME: Can we handle an empty EllipsisLoc? 12794 // Use emtpy EllipsisLoc if X and Y differ. 12795 12796 EPIX.HasTrailingReturn = EPIX.HasTrailingReturn && EPIY.HasTrailingReturn; 12797 12798 QualType R = 12799 Ctx.getCommonSugaredType(FX->getReturnType(), FY->getReturnType()); 12800 auto P = getCommonTypes(Ctx, FX->param_types(), FY->param_types(), 12801 /*Unqualified=*/true); 12802 12803 SmallVector<QualType, 8> Exceptions; 12804 EPIX.ExceptionSpec = Ctx.mergeExceptionSpecs( 12805 EPIX.ExceptionSpec, EPIY.ExceptionSpec, Exceptions, true); 12806 return Ctx.getFunctionType(R, P, EPIX); 12807 } 12808 case Type::ObjCObject: { 12809 const auto *OX = cast<ObjCObjectType>(X), *OY = cast<ObjCObjectType>(Y); 12810 assert( 12811 std::equal(OX->getProtocols().begin(), OX->getProtocols().end(), 12812 OY->getProtocols().begin(), OY->getProtocols().end(), 12813 [](const ObjCProtocolDecl *P0, const ObjCProtocolDecl *P1) { 12814 return P0->getCanonicalDecl() == P1->getCanonicalDecl(); 12815 }) && 12816 "protocol lists must be the same"); 12817 auto TAs = getCommonTypes(Ctx, OX->getTypeArgsAsWritten(), 12818 OY->getTypeArgsAsWritten()); 12819 return Ctx.getObjCObjectType( 12820 Ctx.getCommonSugaredType(OX->getBaseType(), OY->getBaseType()), TAs, 12821 OX->getProtocols(), 12822 OX->isKindOfTypeAsWritten() && OY->isKindOfTypeAsWritten()); 12823 } 12824 case Type::ConstantMatrix: { 12825 const auto *MX = cast<ConstantMatrixType>(X), 12826 *MY = cast<ConstantMatrixType>(Y); 12827 assert(MX->getNumRows() == MY->getNumRows()); 12828 assert(MX->getNumColumns() == MY->getNumColumns()); 12829 return Ctx.getConstantMatrixType(getCommonElementType(Ctx, MX, MY), 12830 MX->getNumRows(), MX->getNumColumns()); 12831 } 12832 case Type::DependentSizedMatrix: { 12833 const auto *MX = cast<DependentSizedMatrixType>(X), 12834 *MY = cast<DependentSizedMatrixType>(Y); 12835 assert(Ctx.hasSameExpr(MX->getRowExpr(), MY->getRowExpr())); 12836 assert(Ctx.hasSameExpr(MX->getColumnExpr(), MY->getColumnExpr())); 12837 return Ctx.getDependentSizedMatrixType( 12838 getCommonElementType(Ctx, MX, MY), MX->getRowExpr(), 12839 MX->getColumnExpr(), getCommonAttrLoc(MX, MY)); 12840 } 12841 case Type::Vector: { 12842 const auto *VX = cast<VectorType>(X), *VY = cast<VectorType>(Y); 12843 assert(VX->getNumElements() == VY->getNumElements()); 12844 assert(VX->getVectorKind() == VY->getVectorKind()); 12845 return Ctx.getVectorType(getCommonElementType(Ctx, VX, VY), 12846 VX->getNumElements(), VX->getVectorKind()); 12847 } 12848 case Type::ExtVector: { 12849 const auto *VX = cast<ExtVectorType>(X), *VY = cast<ExtVectorType>(Y); 12850 assert(VX->getNumElements() == VY->getNumElements()); 12851 return Ctx.getExtVectorType(getCommonElementType(Ctx, VX, VY), 12852 VX->getNumElements()); 12853 } 12854 case Type::DependentSizedExtVector: { 12855 const auto *VX = cast<DependentSizedExtVectorType>(X), 12856 *VY = cast<DependentSizedExtVectorType>(Y); 12857 return Ctx.getDependentSizedExtVectorType(getCommonElementType(Ctx, VX, VY), 12858 getCommonSizeExpr(Ctx, VX, VY), 12859 getCommonAttrLoc(VX, VY)); 12860 } 12861 case Type::DependentVector: { 12862 const auto *VX = cast<DependentVectorType>(X), 12863 *VY = cast<DependentVectorType>(Y); 12864 assert(VX->getVectorKind() == VY->getVectorKind()); 12865 return Ctx.getDependentVectorType( 12866 getCommonElementType(Ctx, VX, VY), getCommonSizeExpr(Ctx, VX, VY), 12867 getCommonAttrLoc(VX, VY), VX->getVectorKind()); 12868 } 12869 case Type::InjectedClassName: { 12870 const auto *IX = cast<InjectedClassNameType>(X), 12871 *IY = cast<InjectedClassNameType>(Y); 12872 return Ctx.getInjectedClassNameType( 12873 getCommonDeclChecked(IX->getDecl(), IY->getDecl()), 12874 Ctx.getCommonSugaredType(IX->getInjectedSpecializationType(), 12875 IY->getInjectedSpecializationType())); 12876 } 12877 case Type::TemplateSpecialization: { 12878 const auto *TX = cast<TemplateSpecializationType>(X), 12879 *TY = cast<TemplateSpecializationType>(Y); 12880 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 12881 TY->template_arguments()); 12882 return Ctx.getTemplateSpecializationType( 12883 ::getCommonTemplateNameChecked(Ctx, TX->getTemplateName(), 12884 TY->getTemplateName()), 12885 As, X->getCanonicalTypeInternal()); 12886 } 12887 case Type::Decltype: { 12888 const auto *DX = cast<DecltypeType>(X); 12889 [[maybe_unused]] const auto *DY = cast<DecltypeType>(Y); 12890 assert(DX->isDependentType()); 12891 assert(DY->isDependentType()); 12892 assert(Ctx.hasSameExpr(DX->getUnderlyingExpr(), DY->getUnderlyingExpr())); 12893 // As Decltype is not uniqued, building a common type would be wasteful. 12894 return QualType(DX, 0); 12895 } 12896 case Type::DependentName: { 12897 const auto *NX = cast<DependentNameType>(X), 12898 *NY = cast<DependentNameType>(Y); 12899 assert(NX->getIdentifier() == NY->getIdentifier()); 12900 return Ctx.getDependentNameType( 12901 getCommonTypeKeyword(NX, NY), getCommonNNS(Ctx, NX, NY), 12902 NX->getIdentifier(), NX->getCanonicalTypeInternal()); 12903 } 12904 case Type::DependentTemplateSpecialization: { 12905 const auto *TX = cast<DependentTemplateSpecializationType>(X), 12906 *TY = cast<DependentTemplateSpecializationType>(Y); 12907 assert(TX->getIdentifier() == TY->getIdentifier()); 12908 auto As = getCommonTemplateArguments(Ctx, TX->template_arguments(), 12909 TY->template_arguments()); 12910 return Ctx.getDependentTemplateSpecializationType( 12911 getCommonTypeKeyword(TX, TY), getCommonNNS(Ctx, TX, TY), 12912 TX->getIdentifier(), As); 12913 } 12914 case Type::UnaryTransform: { 12915 const auto *TX = cast<UnaryTransformType>(X), 12916 *TY = cast<UnaryTransformType>(Y); 12917 assert(TX->getUTTKind() == TY->getUTTKind()); 12918 return Ctx.getUnaryTransformType( 12919 Ctx.getCommonSugaredType(TX->getBaseType(), TY->getBaseType()), 12920 Ctx.getCommonSugaredType(TX->getUnderlyingType(), 12921 TY->getUnderlyingType()), 12922 TX->getUTTKind()); 12923 } 12924 case Type::PackExpansion: { 12925 const auto *PX = cast<PackExpansionType>(X), 12926 *PY = cast<PackExpansionType>(Y); 12927 assert(PX->getNumExpansions() == PY->getNumExpansions()); 12928 return Ctx.getPackExpansionType( 12929 Ctx.getCommonSugaredType(PX->getPattern(), PY->getPattern()), 12930 PX->getNumExpansions(), false); 12931 } 12932 case Type::Pipe: { 12933 const auto *PX = cast<PipeType>(X), *PY = cast<PipeType>(Y); 12934 assert(PX->isReadOnly() == PY->isReadOnly()); 12935 auto MP = PX->isReadOnly() ? &ASTContext::getReadPipeType 12936 : &ASTContext::getWritePipeType; 12937 return (Ctx.*MP)(getCommonElementType(Ctx, PX, PY)); 12938 } 12939 case Type::TemplateTypeParm: { 12940 const auto *TX = cast<TemplateTypeParmType>(X), 12941 *TY = cast<TemplateTypeParmType>(Y); 12942 assert(TX->getDepth() == TY->getDepth()); 12943 assert(TX->getIndex() == TY->getIndex()); 12944 assert(TX->isParameterPack() == TY->isParameterPack()); 12945 return Ctx.getTemplateTypeParmType( 12946 TX->getDepth(), TX->getIndex(), TX->isParameterPack(), 12947 getCommonDecl(TX->getDecl(), TY->getDecl())); 12948 } 12949 } 12950 llvm_unreachable("Unknown Type Class"); 12951 } 12952 12953 static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X, 12954 const Type *Y, 12955 SplitQualType Underlying) { 12956 Type::TypeClass TC = X->getTypeClass(); 12957 if (TC != Y->getTypeClass()) 12958 return QualType(); 12959 switch (TC) { 12960 #define UNEXPECTED_TYPE(Class, Kind) \ 12961 case Type::Class: \ 12962 llvm_unreachable("Unexpected " Kind ": " #Class); 12963 #define TYPE(Class, Base) 12964 #define DEPENDENT_TYPE(Class, Base) UNEXPECTED_TYPE(Class, "dependent") 12965 #include "clang/AST/TypeNodes.inc" 12966 12967 #define CANONICAL_TYPE(Class) UNEXPECTED_TYPE(Class, "canonical") 12968 CANONICAL_TYPE(Atomic) 12969 CANONICAL_TYPE(BitInt) 12970 CANONICAL_TYPE(BlockPointer) 12971 CANONICAL_TYPE(Builtin) 12972 CANONICAL_TYPE(Complex) 12973 CANONICAL_TYPE(ConstantArray) 12974 CANONICAL_TYPE(ConstantMatrix) 12975 CANONICAL_TYPE(Enum) 12976 CANONICAL_TYPE(ExtVector) 12977 CANONICAL_TYPE(FunctionNoProto) 12978 CANONICAL_TYPE(FunctionProto) 12979 CANONICAL_TYPE(IncompleteArray) 12980 CANONICAL_TYPE(LValueReference) 12981 CANONICAL_TYPE(MemberPointer) 12982 CANONICAL_TYPE(ObjCInterface) 12983 CANONICAL_TYPE(ObjCObject) 12984 CANONICAL_TYPE(ObjCObjectPointer) 12985 CANONICAL_TYPE(Pipe) 12986 CANONICAL_TYPE(Pointer) 12987 CANONICAL_TYPE(Record) 12988 CANONICAL_TYPE(RValueReference) 12989 CANONICAL_TYPE(VariableArray) 12990 CANONICAL_TYPE(Vector) 12991 #undef CANONICAL_TYPE 12992 12993 #undef UNEXPECTED_TYPE 12994 12995 case Type::Adjusted: { 12996 const auto *AX = cast<AdjustedType>(X), *AY = cast<AdjustedType>(Y); 12997 QualType OX = AX->getOriginalType(), OY = AY->getOriginalType(); 12998 if (!Ctx.hasSameType(OX, OY)) 12999 return QualType(); 13000 // FIXME: It's inefficient to have to unify the original types. 13001 return Ctx.getAdjustedType(Ctx.getCommonSugaredType(OX, OY), 13002 Ctx.getQualifiedType(Underlying)); 13003 } 13004 case Type::Decayed: { 13005 const auto *DX = cast<DecayedType>(X), *DY = cast<DecayedType>(Y); 13006 QualType OX = DX->getOriginalType(), OY = DY->getOriginalType(); 13007 if (!Ctx.hasSameType(OX, OY)) 13008 return QualType(); 13009 // FIXME: It's inefficient to have to unify the original types. 13010 return Ctx.getDecayedType(Ctx.getCommonSugaredType(OX, OY), 13011 Ctx.getQualifiedType(Underlying)); 13012 } 13013 case Type::Attributed: { 13014 const auto *AX = cast<AttributedType>(X), *AY = cast<AttributedType>(Y); 13015 AttributedType::Kind Kind = AX->getAttrKind(); 13016 if (Kind != AY->getAttrKind()) 13017 return QualType(); 13018 QualType MX = AX->getModifiedType(), MY = AY->getModifiedType(); 13019 if (!Ctx.hasSameType(MX, MY)) 13020 return QualType(); 13021 // FIXME: It's inefficient to have to unify the modified types. 13022 return Ctx.getAttributedType(Kind, Ctx.getCommonSugaredType(MX, MY), 13023 Ctx.getQualifiedType(Underlying)); 13024 } 13025 case Type::BTFTagAttributed: { 13026 const auto *BX = cast<BTFTagAttributedType>(X); 13027 const BTFTypeTagAttr *AX = BX->getAttr(); 13028 // The attribute is not uniqued, so just compare the tag. 13029 if (AX->getBTFTypeTag() != 13030 cast<BTFTagAttributedType>(Y)->getAttr()->getBTFTypeTag()) 13031 return QualType(); 13032 return Ctx.getBTFTagAttributedType(AX, Ctx.getQualifiedType(Underlying)); 13033 } 13034 case Type::Auto: { 13035 const auto *AX = cast<AutoType>(X), *AY = cast<AutoType>(Y); 13036 13037 AutoTypeKeyword KW = AX->getKeyword(); 13038 if (KW != AY->getKeyword()) 13039 return QualType(); 13040 13041 ConceptDecl *CD = ::getCommonDecl(AX->getTypeConstraintConcept(), 13042 AY->getTypeConstraintConcept()); 13043 SmallVector<TemplateArgument, 8> As; 13044 if (CD && 13045 getCommonTemplateArguments(Ctx, As, AX->getTypeConstraintArguments(), 13046 AY->getTypeConstraintArguments())) { 13047 CD = nullptr; // The arguments differ, so make it unconstrained. 13048 As.clear(); 13049 } 13050 13051 // Both auto types can't be dependent, otherwise they wouldn't have been 13052 // sugar. This implies they can't contain unexpanded packs either. 13053 return Ctx.getAutoType(Ctx.getQualifiedType(Underlying), AX->getKeyword(), 13054 /*IsDependent=*/false, /*IsPack=*/false, CD, As); 13055 } 13056 case Type::Decltype: 13057 return QualType(); 13058 case Type::DeducedTemplateSpecialization: 13059 // FIXME: Try to merge these. 13060 return QualType(); 13061 13062 case Type::Elaborated: { 13063 const auto *EX = cast<ElaboratedType>(X), *EY = cast<ElaboratedType>(Y); 13064 return Ctx.getElaboratedType( 13065 ::getCommonTypeKeyword(EX, EY), ::getCommonNNS(Ctx, EX, EY), 13066 Ctx.getQualifiedType(Underlying), 13067 ::getCommonDecl(EX->getOwnedTagDecl(), EY->getOwnedTagDecl())); 13068 } 13069 case Type::MacroQualified: { 13070 const auto *MX = cast<MacroQualifiedType>(X), 13071 *MY = cast<MacroQualifiedType>(Y); 13072 const IdentifierInfo *IX = MX->getMacroIdentifier(); 13073 if (IX != MY->getMacroIdentifier()) 13074 return QualType(); 13075 return Ctx.getMacroQualifiedType(Ctx.getQualifiedType(Underlying), IX); 13076 } 13077 case Type::SubstTemplateTypeParm: { 13078 const auto *SX = cast<SubstTemplateTypeParmType>(X), 13079 *SY = cast<SubstTemplateTypeParmType>(Y); 13080 Decl *CD = 13081 ::getCommonDecl(SX->getAssociatedDecl(), SY->getAssociatedDecl()); 13082 if (!CD) 13083 return QualType(); 13084 unsigned Index = SX->getIndex(); 13085 if (Index != SY->getIndex()) 13086 return QualType(); 13087 auto PackIndex = SX->getPackIndex(); 13088 if (PackIndex != SY->getPackIndex()) 13089 return QualType(); 13090 return Ctx.getSubstTemplateTypeParmType(Ctx.getQualifiedType(Underlying), 13091 CD, Index, PackIndex); 13092 } 13093 case Type::ObjCTypeParam: 13094 // FIXME: Try to merge these. 13095 return QualType(); 13096 case Type::Paren: 13097 return Ctx.getParenType(Ctx.getQualifiedType(Underlying)); 13098 13099 case Type::TemplateSpecialization: { 13100 const auto *TX = cast<TemplateSpecializationType>(X), 13101 *TY = cast<TemplateSpecializationType>(Y); 13102 TemplateName CTN = ::getCommonTemplateName(Ctx, TX->getTemplateName(), 13103 TY->getTemplateName()); 13104 if (!CTN.getAsVoidPointer()) 13105 return QualType(); 13106 SmallVector<TemplateArgument, 8> Args; 13107 if (getCommonTemplateArguments(Ctx, Args, TX->template_arguments(), 13108 TY->template_arguments())) 13109 return QualType(); 13110 return Ctx.getTemplateSpecializationType(CTN, Args, 13111 Ctx.getQualifiedType(Underlying)); 13112 } 13113 case Type::Typedef: { 13114 const auto *TX = cast<TypedefType>(X), *TY = cast<TypedefType>(Y); 13115 const TypedefNameDecl *CD = ::getCommonDecl(TX->getDecl(), TY->getDecl()); 13116 if (!CD) 13117 return QualType(); 13118 return Ctx.getTypedefType(CD, Ctx.getQualifiedType(Underlying)); 13119 } 13120 case Type::TypeOf: { 13121 // The common sugar between two typeof expressions, where one is 13122 // potentially a typeof_unqual and the other is not, we unify to the 13123 // qualified type as that retains the most information along with the type. 13124 // We only return a typeof_unqual type when both types are unqual types. 13125 TypeOfKind Kind = TypeOfKind::Qualified; 13126 if (cast<TypeOfType>(X)->getKind() == cast<TypeOfType>(Y)->getKind() && 13127 cast<TypeOfType>(X)->getKind() == TypeOfKind::Unqualified) 13128 Kind = TypeOfKind::Unqualified; 13129 return Ctx.getTypeOfType(Ctx.getQualifiedType(Underlying), Kind); 13130 } 13131 case Type::TypeOfExpr: 13132 return QualType(); 13133 13134 case Type::UnaryTransform: { 13135 const auto *UX = cast<UnaryTransformType>(X), 13136 *UY = cast<UnaryTransformType>(Y); 13137 UnaryTransformType::UTTKind KX = UX->getUTTKind(); 13138 if (KX != UY->getUTTKind()) 13139 return QualType(); 13140 QualType BX = UX->getBaseType(), BY = UY->getBaseType(); 13141 if (!Ctx.hasSameType(BX, BY)) 13142 return QualType(); 13143 // FIXME: It's inefficient to have to unify the base types. 13144 return Ctx.getUnaryTransformType(Ctx.getCommonSugaredType(BX, BY), 13145 Ctx.getQualifiedType(Underlying), KX); 13146 } 13147 case Type::Using: { 13148 const auto *UX = cast<UsingType>(X), *UY = cast<UsingType>(Y); 13149 const UsingShadowDecl *CD = 13150 ::getCommonDecl(UX->getFoundDecl(), UY->getFoundDecl()); 13151 if (!CD) 13152 return QualType(); 13153 return Ctx.getUsingType(CD, Ctx.getQualifiedType(Underlying)); 13154 } 13155 } 13156 llvm_unreachable("Unhandled Type Class"); 13157 } 13158 13159 static auto unwrapSugar(SplitQualType &T, Qualifiers &QTotal) { 13160 SmallVector<SplitQualType, 8> R; 13161 while (true) { 13162 QTotal.addConsistentQualifiers(T.Quals); 13163 QualType NT = T.Ty->getLocallyUnqualifiedSingleStepDesugaredType(); 13164 if (NT == QualType(T.Ty, 0)) 13165 break; 13166 R.push_back(T); 13167 T = NT.split(); 13168 } 13169 return R; 13170 } 13171 13172 QualType ASTContext::getCommonSugaredType(QualType X, QualType Y, 13173 bool Unqualified) { 13174 assert(Unqualified ? hasSameUnqualifiedType(X, Y) : hasSameType(X, Y)); 13175 if (X == Y) 13176 return X; 13177 if (!Unqualified) { 13178 if (X.isCanonical()) 13179 return X; 13180 if (Y.isCanonical()) 13181 return Y; 13182 } 13183 13184 SplitQualType SX = X.split(), SY = Y.split(); 13185 Qualifiers QX, QY; 13186 // Desugar SX and SY, setting the sugar and qualifiers aside into Xs and Ys, 13187 // until we reach their underlying "canonical nodes". Note these are not 13188 // necessarily canonical types, as they may still have sugared properties. 13189 // QX and QY will store the sum of all qualifiers in Xs and Ys respectively. 13190 auto Xs = ::unwrapSugar(SX, QX), Ys = ::unwrapSugar(SY, QY); 13191 if (SX.Ty != SY.Ty) { 13192 // The canonical nodes differ. Build a common canonical node out of the two, 13193 // unifying their sugar. This may recurse back here. 13194 SX.Ty = 13195 ::getCommonNonSugarTypeNode(*this, SX.Ty, QX, SY.Ty, QY).getTypePtr(); 13196 } else { 13197 // The canonical nodes were identical: We may have desugared too much. 13198 // Add any common sugar back in. 13199 while (!Xs.empty() && !Ys.empty() && Xs.back().Ty == Ys.back().Ty) { 13200 QX -= SX.Quals; 13201 QY -= SY.Quals; 13202 SX = Xs.pop_back_val(); 13203 SY = Ys.pop_back_val(); 13204 } 13205 } 13206 if (Unqualified) 13207 QX = Qualifiers::removeCommonQualifiers(QX, QY); 13208 else 13209 assert(QX == QY); 13210 13211 // Even though the remaining sugar nodes in Xs and Ys differ, some may be 13212 // related. Walk up these nodes, unifying them and adding the result. 13213 while (!Xs.empty() && !Ys.empty()) { 13214 auto Underlying = SplitQualType( 13215 SX.Ty, Qualifiers::removeCommonQualifiers(SX.Quals, SY.Quals)); 13216 SX = Xs.pop_back_val(); 13217 SY = Ys.pop_back_val(); 13218 SX.Ty = ::getCommonSugarTypeNode(*this, SX.Ty, SY.Ty, Underlying) 13219 .getTypePtrOrNull(); 13220 // Stop at the first pair which is unrelated. 13221 if (!SX.Ty) { 13222 SX.Ty = Underlying.Ty; 13223 break; 13224 } 13225 QX -= Underlying.Quals; 13226 }; 13227 13228 // Add back the missing accumulated qualifiers, which were stripped off 13229 // with the sugar nodes we could not unify. 13230 QualType R = getQualifiedType(SX.Ty, QX); 13231 assert(Unqualified ? hasSameUnqualifiedType(R, X) : hasSameType(R, X)); 13232 return R; 13233 } 13234 13235 QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { 13236 assert(Ty->isFixedPointType()); 13237 13238 if (Ty->isSaturatedFixedPointType()) return Ty; 13239 13240 switch (Ty->castAs<BuiltinType>()->getKind()) { 13241 default: 13242 llvm_unreachable("Not a fixed point type!"); 13243 case BuiltinType::ShortAccum: 13244 return SatShortAccumTy; 13245 case BuiltinType::Accum: 13246 return SatAccumTy; 13247 case BuiltinType::LongAccum: 13248 return SatLongAccumTy; 13249 case BuiltinType::UShortAccum: 13250 return SatUnsignedShortAccumTy; 13251 case BuiltinType::UAccum: 13252 return SatUnsignedAccumTy; 13253 case BuiltinType::ULongAccum: 13254 return SatUnsignedLongAccumTy; 13255 case BuiltinType::ShortFract: 13256 return SatShortFractTy; 13257 case BuiltinType::Fract: 13258 return SatFractTy; 13259 case BuiltinType::LongFract: 13260 return SatLongFractTy; 13261 case BuiltinType::UShortFract: 13262 return SatUnsignedShortFractTy; 13263 case BuiltinType::UFract: 13264 return SatUnsignedFractTy; 13265 case BuiltinType::ULongFract: 13266 return SatUnsignedLongFractTy; 13267 } 13268 } 13269 13270 LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { 13271 if (LangOpts.OpenCL) 13272 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); 13273 13274 if (LangOpts.CUDA) 13275 return getTargetInfo().getCUDABuiltinAddressSpace(AS); 13276 13277 return getLangASFromTargetAS(AS); 13278 } 13279 13280 // Explicitly instantiate this in case a Redeclarable<T> is used from a TU that 13281 // doesn't include ASTContext.h 13282 template 13283 clang::LazyGenerationalUpdatePtr< 13284 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType 13285 clang::LazyGenerationalUpdatePtr< 13286 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( 13287 const clang::ASTContext &Ctx, Decl *Value); 13288 13289 unsigned char ASTContext::getFixedPointScale(QualType Ty) const { 13290 assert(Ty->isFixedPointType()); 13291 13292 const TargetInfo &Target = getTargetInfo(); 13293 switch (Ty->castAs<BuiltinType>()->getKind()) { 13294 default: 13295 llvm_unreachable("Not a fixed point type!"); 13296 case BuiltinType::ShortAccum: 13297 case BuiltinType::SatShortAccum: 13298 return Target.getShortAccumScale(); 13299 case BuiltinType::Accum: 13300 case BuiltinType::SatAccum: 13301 return Target.getAccumScale(); 13302 case BuiltinType::LongAccum: 13303 case BuiltinType::SatLongAccum: 13304 return Target.getLongAccumScale(); 13305 case BuiltinType::UShortAccum: 13306 case BuiltinType::SatUShortAccum: 13307 return Target.getUnsignedShortAccumScale(); 13308 case BuiltinType::UAccum: 13309 case BuiltinType::SatUAccum: 13310 return Target.getUnsignedAccumScale(); 13311 case BuiltinType::ULongAccum: 13312 case BuiltinType::SatULongAccum: 13313 return Target.getUnsignedLongAccumScale(); 13314 case BuiltinType::ShortFract: 13315 case BuiltinType::SatShortFract: 13316 return Target.getShortFractScale(); 13317 case BuiltinType::Fract: 13318 case BuiltinType::SatFract: 13319 return Target.getFractScale(); 13320 case BuiltinType::LongFract: 13321 case BuiltinType::SatLongFract: 13322 return Target.getLongFractScale(); 13323 case BuiltinType::UShortFract: 13324 case BuiltinType::SatUShortFract: 13325 return Target.getUnsignedShortFractScale(); 13326 case BuiltinType::UFract: 13327 case BuiltinType::SatUFract: 13328 return Target.getUnsignedFractScale(); 13329 case BuiltinType::ULongFract: 13330 case BuiltinType::SatULongFract: 13331 return Target.getUnsignedLongFractScale(); 13332 } 13333 } 13334 13335 unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { 13336 assert(Ty->isFixedPointType()); 13337 13338 const TargetInfo &Target = getTargetInfo(); 13339 switch (Ty->castAs<BuiltinType>()->getKind()) { 13340 default: 13341 llvm_unreachable("Not a fixed point type!"); 13342 case BuiltinType::ShortAccum: 13343 case BuiltinType::SatShortAccum: 13344 return Target.getShortAccumIBits(); 13345 case BuiltinType::Accum: 13346 case BuiltinType::SatAccum: 13347 return Target.getAccumIBits(); 13348 case BuiltinType::LongAccum: 13349 case BuiltinType::SatLongAccum: 13350 return Target.getLongAccumIBits(); 13351 case BuiltinType::UShortAccum: 13352 case BuiltinType::SatUShortAccum: 13353 return Target.getUnsignedShortAccumIBits(); 13354 case BuiltinType::UAccum: 13355 case BuiltinType::SatUAccum: 13356 return Target.getUnsignedAccumIBits(); 13357 case BuiltinType::ULongAccum: 13358 case BuiltinType::SatULongAccum: 13359 return Target.getUnsignedLongAccumIBits(); 13360 case BuiltinType::ShortFract: 13361 case BuiltinType::SatShortFract: 13362 case BuiltinType::Fract: 13363 case BuiltinType::SatFract: 13364 case BuiltinType::LongFract: 13365 case BuiltinType::SatLongFract: 13366 case BuiltinType::UShortFract: 13367 case BuiltinType::SatUShortFract: 13368 case BuiltinType::UFract: 13369 case BuiltinType::SatUFract: 13370 case BuiltinType::ULongFract: 13371 case BuiltinType::SatULongFract: 13372 return 0; 13373 } 13374 } 13375 13376 llvm::FixedPointSemantics 13377 ASTContext::getFixedPointSemantics(QualType Ty) const { 13378 assert((Ty->isFixedPointType() || Ty->isIntegerType()) && 13379 "Can only get the fixed point semantics for a " 13380 "fixed point or integer type."); 13381 if (Ty->isIntegerType()) 13382 return llvm::FixedPointSemantics::GetIntegerSemantics( 13383 getIntWidth(Ty), Ty->isSignedIntegerType()); 13384 13385 bool isSigned = Ty->isSignedFixedPointType(); 13386 return llvm::FixedPointSemantics( 13387 static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, 13388 Ty->isSaturatedFixedPointType(), 13389 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); 13390 } 13391 13392 llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { 13393 assert(Ty->isFixedPointType()); 13394 return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty)); 13395 } 13396 13397 llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { 13398 assert(Ty->isFixedPointType()); 13399 return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty)); 13400 } 13401 13402 QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { 13403 assert(Ty->isUnsignedFixedPointType() && 13404 "Expected unsigned fixed point type"); 13405 13406 switch (Ty->castAs<BuiltinType>()->getKind()) { 13407 case BuiltinType::UShortAccum: 13408 return ShortAccumTy; 13409 case BuiltinType::UAccum: 13410 return AccumTy; 13411 case BuiltinType::ULongAccum: 13412 return LongAccumTy; 13413 case BuiltinType::SatUShortAccum: 13414 return SatShortAccumTy; 13415 case BuiltinType::SatUAccum: 13416 return SatAccumTy; 13417 case BuiltinType::SatULongAccum: 13418 return SatLongAccumTy; 13419 case BuiltinType::UShortFract: 13420 return ShortFractTy; 13421 case BuiltinType::UFract: 13422 return FractTy; 13423 case BuiltinType::ULongFract: 13424 return LongFractTy; 13425 case BuiltinType::SatUShortFract: 13426 return SatShortFractTy; 13427 case BuiltinType::SatUFract: 13428 return SatFractTy; 13429 case BuiltinType::SatULongFract: 13430 return SatLongFractTy; 13431 default: 13432 llvm_unreachable("Unexpected unsigned fixed point type"); 13433 } 13434 } 13435 13436 std::vector<std::string> ASTContext::filterFunctionTargetVersionAttrs( 13437 const TargetVersionAttr *TV) const { 13438 assert(TV != nullptr); 13439 llvm::SmallVector<StringRef, 8> Feats; 13440 std::vector<std::string> ResFeats; 13441 TV->getFeatures(Feats); 13442 for (auto &Feature : Feats) 13443 if (Target->validateCpuSupports(Feature.str())) 13444 // Use '?' to mark features that came from TargetVersion. 13445 ResFeats.push_back("?" + Feature.str()); 13446 return ResFeats; 13447 } 13448 13449 ParsedTargetAttr 13450 ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { 13451 assert(TD != nullptr); 13452 ParsedTargetAttr ParsedAttr = Target->parseTargetAttr(TD->getFeaturesStr()); 13453 13454 llvm::erase_if(ParsedAttr.Features, [&](const std::string &Feat) { 13455 return !Target->isValidFeatureName(StringRef{Feat}.substr(1)); 13456 }); 13457 return ParsedAttr; 13458 } 13459 13460 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 13461 const FunctionDecl *FD) const { 13462 if (FD) 13463 getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); 13464 else 13465 Target->initFeatureMap(FeatureMap, getDiagnostics(), 13466 Target->getTargetOpts().CPU, 13467 Target->getTargetOpts().Features); 13468 } 13469 13470 // Fills in the supplied string map with the set of target features for the 13471 // passed in function. 13472 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 13473 GlobalDecl GD) const { 13474 StringRef TargetCPU = Target->getTargetOpts().CPU; 13475 const FunctionDecl *FD = GD.getDecl()->getAsFunction(); 13476 if (const auto *TD = FD->getAttr<TargetAttr>()) { 13477 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); 13478 13479 // Make a copy of the features as passed on the command line into the 13480 // beginning of the additional features from the function to override. 13481 ParsedAttr.Features.insert( 13482 ParsedAttr.Features.begin(), 13483 Target->getTargetOpts().FeaturesAsWritten.begin(), 13484 Target->getTargetOpts().FeaturesAsWritten.end()); 13485 13486 if (ParsedAttr.CPU != "" && Target->isValidCPUName(ParsedAttr.CPU)) 13487 TargetCPU = ParsedAttr.CPU; 13488 13489 // Now populate the feature map, first with the TargetCPU which is either 13490 // the default or a new one from the target attribute string. Then we'll use 13491 // the passed in features (FeaturesAsWritten) along with the new ones from 13492 // the attribute. 13493 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, 13494 ParsedAttr.Features); 13495 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) { 13496 llvm::SmallVector<StringRef, 32> FeaturesTmp; 13497 Target->getCPUSpecificCPUDispatchFeatures( 13498 SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); 13499 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end()); 13500 Features.insert(Features.begin(), 13501 Target->getTargetOpts().FeaturesAsWritten.begin(), 13502 Target->getTargetOpts().FeaturesAsWritten.end()); 13503 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 13504 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) { 13505 std::vector<std::string> Features; 13506 StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); 13507 if (Target->getTriple().isAArch64()) { 13508 // TargetClones for AArch64 13509 if (VersionStr != "default") { 13510 SmallVector<StringRef, 1> VersionFeatures; 13511 VersionStr.split(VersionFeatures, "+"); 13512 for (auto &VFeature : VersionFeatures) { 13513 VFeature = VFeature.trim(); 13514 // Use '?' to mark features that came from AArch64 TargetClones. 13515 Features.push_back((StringRef{"?"} + VFeature).str()); 13516 } 13517 } 13518 Features.insert(Features.begin(), 13519 Target->getTargetOpts().FeaturesAsWritten.begin(), 13520 Target->getTargetOpts().FeaturesAsWritten.end()); 13521 } else { 13522 if (VersionStr.starts_with("arch=")) 13523 TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1); 13524 else if (VersionStr != "default") 13525 Features.push_back((StringRef{"+"} + VersionStr).str()); 13526 } 13527 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 13528 } else if (const auto *TV = FD->getAttr<TargetVersionAttr>()) { 13529 std::vector<std::string> Feats = filterFunctionTargetVersionAttrs(TV); 13530 Feats.insert(Feats.begin(), 13531 Target->getTargetOpts().FeaturesAsWritten.begin(), 13532 Target->getTargetOpts().FeaturesAsWritten.end()); 13533 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Feats); 13534 } else { 13535 FeatureMap = Target->getTargetOpts().FeatureMap; 13536 } 13537 } 13538 13539 OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { 13540 OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); 13541 return *OMPTraitInfoVector.back(); 13542 } 13543 13544 const StreamingDiagnostic &clang:: 13545 operator<<(const StreamingDiagnostic &DB, 13546 const ASTContext::SectionInfo &Section) { 13547 if (Section.Decl) 13548 return DB << Section.Decl; 13549 return DB << "a prior #pragma section"; 13550 } 13551 13552 bool ASTContext::mayExternalize(const Decl *D) const { 13553 bool IsInternalVar = 13554 isa<VarDecl>(D) && 13555 basicGVALinkageForVariable(*this, cast<VarDecl>(D)) == GVA_Internal; 13556 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() && 13557 !D->getAttr<CUDADeviceAttr>()->isImplicit()) || 13558 (D->hasAttr<CUDAConstantAttr>() && 13559 !D->getAttr<CUDAConstantAttr>()->isImplicit()); 13560 // CUDA/HIP: managed variables need to be externalized since it is 13561 // a declaration in IR, therefore cannot have internal linkage. Kernels in 13562 // anonymous name space needs to be externalized to avoid duplicate symbols. 13563 return (IsInternalVar && 13564 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar)) || 13565 (D->hasAttr<CUDAGlobalAttr>() && 13566 basicGVALinkageForFunction(*this, cast<FunctionDecl>(D)) == 13567 GVA_Internal); 13568 } 13569 13570 bool ASTContext::shouldExternalize(const Decl *D) const { 13571 return mayExternalize(D) && 13572 (D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() || 13573 CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D))); 13574 } 13575 13576 StringRef ASTContext::getCUIDHash() const { 13577 if (!CUIDHash.empty()) 13578 return CUIDHash; 13579 if (LangOpts.CUID.empty()) 13580 return StringRef(); 13581 CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); 13582 return CUIDHash; 13583 } 13584