1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ASTContext interface. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/AST/ASTContext.h" 14 #include "CXXABI.h" 15 #include "Interp/Context.h" 16 #include "clang/AST/APValue.h" 17 #include "clang/AST/ASTConcept.h" 18 #include "clang/AST/ASTMutationListener.h" 19 #include "clang/AST/ASTTypeTraits.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/AttrIterator.h" 22 #include "clang/AST/CharUnits.h" 23 #include "clang/AST/Comment.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclBase.h" 26 #include "clang/AST/DeclCXX.h" 27 #include "clang/AST/DeclContextInternals.h" 28 #include "clang/AST/DeclObjC.h" 29 #include "clang/AST/DeclOpenMP.h" 30 #include "clang/AST/DeclTemplate.h" 31 #include "clang/AST/DeclarationName.h" 32 #include "clang/AST/DependenceFlags.h" 33 #include "clang/AST/Expr.h" 34 #include "clang/AST/ExprCXX.h" 35 #include "clang/AST/ExprConcepts.h" 36 #include "clang/AST/ExternalASTSource.h" 37 #include "clang/AST/Mangle.h" 38 #include "clang/AST/MangleNumberingContext.h" 39 #include "clang/AST/NestedNameSpecifier.h" 40 #include "clang/AST/ParentMapContext.h" 41 #include "clang/AST/RawCommentList.h" 42 #include "clang/AST/RecordLayout.h" 43 #include "clang/AST/Stmt.h" 44 #include "clang/AST/TemplateBase.h" 45 #include "clang/AST/TemplateName.h" 46 #include "clang/AST/Type.h" 47 #include "clang/AST/TypeLoc.h" 48 #include "clang/AST/UnresolvedSet.h" 49 #include "clang/AST/VTableBuilder.h" 50 #include "clang/Basic/AddressSpaces.h" 51 #include "clang/Basic/Builtins.h" 52 #include "clang/Basic/CommentOptions.h" 53 #include "clang/Basic/ExceptionSpecificationType.h" 54 #include "clang/Basic/IdentifierTable.h" 55 #include "clang/Basic/LLVM.h" 56 #include "clang/Basic/LangOptions.h" 57 #include "clang/Basic/Linkage.h" 58 #include "clang/Basic/Module.h" 59 #include "clang/Basic/NoSanitizeList.h" 60 #include "clang/Basic/ObjCRuntime.h" 61 #include "clang/Basic/SourceLocation.h" 62 #include "clang/Basic/SourceManager.h" 63 #include "clang/Basic/Specifiers.h" 64 #include "clang/Basic/TargetCXXABI.h" 65 #include "clang/Basic/TargetInfo.h" 66 #include "clang/Basic/XRayLists.h" 67 #include "llvm/ADT/APFixedPoint.h" 68 #include "llvm/ADT/APInt.h" 69 #include "llvm/ADT/APSInt.h" 70 #include "llvm/ADT/ArrayRef.h" 71 #include "llvm/ADT/DenseMap.h" 72 #include "llvm/ADT/DenseSet.h" 73 #include "llvm/ADT/FoldingSet.h" 74 #include "llvm/ADT/None.h" 75 #include "llvm/ADT/Optional.h" 76 #include "llvm/ADT/PointerUnion.h" 77 #include "llvm/ADT/STLExtras.h" 78 #include "llvm/ADT/SmallPtrSet.h" 79 #include "llvm/ADT/SmallVector.h" 80 #include "llvm/ADT/StringExtras.h" 81 #include "llvm/ADT/StringRef.h" 82 #include "llvm/ADT/Triple.h" 83 #include "llvm/Support/Capacity.h" 84 #include "llvm/Support/Casting.h" 85 #include "llvm/Support/Compiler.h" 86 #include "llvm/Support/ErrorHandling.h" 87 #include "llvm/Support/MD5.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/raw_ostream.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstddef> 93 #include <cstdint> 94 #include <cstdlib> 95 #include <map> 96 #include <memory> 97 #include <string> 98 #include <tuple> 99 #include <utility> 100 101 using namespace clang; 102 103 enum FloatingRank { 104 BFloat16Rank, 105 Float16Rank, 106 HalfRank, 107 FloatRank, 108 DoubleRank, 109 LongDoubleRank, 110 Float128Rank, 111 Ibm128Rank 112 }; 113 114 /// \returns location that is relevant when searching for Doc comments related 115 /// to \p D. 116 static SourceLocation getDeclLocForCommentSearch(const Decl *D, 117 SourceManager &SourceMgr) { 118 assert(D); 119 120 // User can not attach documentation to implicit declarations. 121 if (D->isImplicit()) 122 return {}; 123 124 // User can not attach documentation to implicit instantiations. 125 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 126 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 127 return {}; 128 } 129 130 if (const auto *VD = dyn_cast<VarDecl>(D)) { 131 if (VD->isStaticDataMember() && 132 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 133 return {}; 134 } 135 136 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) { 137 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 138 return {}; 139 } 140 141 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) { 142 TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); 143 if (TSK == TSK_ImplicitInstantiation || 144 TSK == TSK_Undeclared) 145 return {}; 146 } 147 148 if (const auto *ED = dyn_cast<EnumDecl>(D)) { 149 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 150 return {}; 151 } 152 if (const auto *TD = dyn_cast<TagDecl>(D)) { 153 // When tag declaration (but not definition!) is part of the 154 // decl-specifier-seq of some other declaration, it doesn't get comment 155 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) 156 return {}; 157 } 158 // TODO: handle comments for function parameters properly. 159 if (isa<ParmVarDecl>(D)) 160 return {}; 161 162 // TODO: we could look up template parameter documentation in the template 163 // documentation. 164 if (isa<TemplateTypeParmDecl>(D) || 165 isa<NonTypeTemplateParmDecl>(D) || 166 isa<TemplateTemplateParmDecl>(D)) 167 return {}; 168 169 // Find declaration location. 170 // For Objective-C declarations we generally don't expect to have multiple 171 // declarators, thus use declaration starting location as the "declaration 172 // location". 173 // For all other declarations multiple declarators are used quite frequently, 174 // so we use the location of the identifier as the "declaration location". 175 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || 176 isa<ObjCPropertyDecl>(D) || 177 isa<RedeclarableTemplateDecl>(D) || 178 isa<ClassTemplateSpecializationDecl>(D) || 179 // Allow association with Y across {} in `typedef struct X {} Y`. 180 isa<TypedefDecl>(D)) 181 return D->getBeginLoc(); 182 183 const SourceLocation DeclLoc = D->getLocation(); 184 if (DeclLoc.isMacroID()) { 185 if (isa<TypedefDecl>(D)) { 186 // If location of the typedef name is in a macro, it is because being 187 // declared via a macro. Try using declaration's starting location as 188 // the "declaration location". 189 return D->getBeginLoc(); 190 } 191 192 if (const auto *TD = dyn_cast<TagDecl>(D)) { 193 // If location of the tag decl is inside a macro, but the spelling of 194 // the tag name comes from a macro argument, it looks like a special 195 // macro like NS_ENUM is being used to define the tag decl. In that 196 // case, adjust the source location to the expansion loc so that we can 197 // attach the comment to the tag decl. 198 if (SourceMgr.isMacroArgExpansion(DeclLoc) && TD->isCompleteDefinition()) 199 return SourceMgr.getExpansionLoc(DeclLoc); 200 } 201 } 202 203 return DeclLoc; 204 } 205 206 RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( 207 const Decl *D, const SourceLocation RepresentativeLocForDecl, 208 const std::map<unsigned, RawComment *> &CommentsInTheFile) const { 209 // If the declaration doesn't map directly to a location in a file, we 210 // can't find the comment. 211 if (RepresentativeLocForDecl.isInvalid() || 212 !RepresentativeLocForDecl.isFileID()) 213 return nullptr; 214 215 // If there are no comments anywhere, we won't find anything. 216 if (CommentsInTheFile.empty()) 217 return nullptr; 218 219 // Decompose the location for the declaration and find the beginning of the 220 // file buffer. 221 const std::pair<FileID, unsigned> DeclLocDecomp = 222 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); 223 224 // Slow path. 225 auto OffsetCommentBehindDecl = 226 CommentsInTheFile.lower_bound(DeclLocDecomp.second); 227 228 // First check whether we have a trailing comment. 229 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { 230 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; 231 if ((CommentBehindDecl->isDocumentation() || 232 LangOpts.CommentOpts.ParseAllComments) && 233 CommentBehindDecl->isTrailingComment() && 234 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || 235 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { 236 237 // Check that Doxygen trailing comment comes after the declaration, starts 238 // on the same line and in the same file as the declaration. 239 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == 240 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, 241 OffsetCommentBehindDecl->first)) { 242 return CommentBehindDecl; 243 } 244 } 245 } 246 247 // The comment just after the declaration was not a trailing comment. 248 // Let's look at the previous comment. 249 if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) 250 return nullptr; 251 252 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; 253 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; 254 255 // Check that we actually have a non-member Doxygen comment. 256 if (!(CommentBeforeDecl->isDocumentation() || 257 LangOpts.CommentOpts.ParseAllComments) || 258 CommentBeforeDecl->isTrailingComment()) 259 return nullptr; 260 261 // Decompose the end of the comment. 262 const unsigned CommentEndOffset = 263 Comments.getCommentEndOffset(CommentBeforeDecl); 264 265 // Get the corresponding buffer. 266 bool Invalid = false; 267 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, 268 &Invalid).data(); 269 if (Invalid) 270 return nullptr; 271 272 // Extract text between the comment and declaration. 273 StringRef Text(Buffer + CommentEndOffset, 274 DeclLocDecomp.second - CommentEndOffset); 275 276 // There should be no other declarations or preprocessor directives between 277 // comment and declaration. 278 if (Text.find_first_of(";{}#@") != StringRef::npos) 279 return nullptr; 280 281 return CommentBeforeDecl; 282 } 283 284 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { 285 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 286 287 // If the declaration doesn't map directly to a location in a file, we 288 // can't find the comment. 289 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 290 return nullptr; 291 292 if (ExternalSource && !CommentsLoaded) { 293 ExternalSource->ReadComments(); 294 CommentsLoaded = true; 295 } 296 297 if (Comments.empty()) 298 return nullptr; 299 300 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; 301 const auto CommentsInThisFile = Comments.getCommentsInFile(File); 302 if (!CommentsInThisFile || CommentsInThisFile->empty()) 303 return nullptr; 304 305 return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile); 306 } 307 308 void ASTContext::addComment(const RawComment &RC) { 309 assert(LangOpts.RetainCommentsFromSystemHeaders || 310 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())); 311 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); 312 } 313 314 /// If we have a 'templated' declaration for a template, adjust 'D' to 315 /// refer to the actual template. 316 /// If we have an implicit instantiation, adjust 'D' to refer to template. 317 static const Decl &adjustDeclToTemplate(const Decl &D) { 318 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) { 319 // Is this function declaration part of a function template? 320 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) 321 return *FTD; 322 323 // Nothing to do if function is not an implicit instantiation. 324 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) 325 return D; 326 327 // Function is an implicit instantiation of a function template? 328 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) 329 return *FTD; 330 331 // Function is instantiated from a member definition of a class template? 332 if (const FunctionDecl *MemberDecl = 333 FD->getInstantiatedFromMemberFunction()) 334 return *MemberDecl; 335 336 return D; 337 } 338 if (const auto *VD = dyn_cast<VarDecl>(&D)) { 339 // Static data member is instantiated from a member definition of a class 340 // template? 341 if (VD->isStaticDataMember()) 342 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) 343 return *MemberDecl; 344 345 return D; 346 } 347 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) { 348 // Is this class declaration part of a class template? 349 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) 350 return *CTD; 351 352 // Class is an implicit instantiation of a class template or partial 353 // specialization? 354 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { 355 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) 356 return D; 357 llvm::PointerUnion<ClassTemplateDecl *, 358 ClassTemplatePartialSpecializationDecl *> 359 PU = CTSD->getSpecializedTemplateOrPartial(); 360 return PU.is<ClassTemplateDecl *>() 361 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>()) 362 : *static_cast<const Decl *>( 363 PU.get<ClassTemplatePartialSpecializationDecl *>()); 364 } 365 366 // Class is instantiated from a member definition of a class template? 367 if (const MemberSpecializationInfo *Info = 368 CRD->getMemberSpecializationInfo()) 369 return *Info->getInstantiatedFrom(); 370 371 return D; 372 } 373 if (const auto *ED = dyn_cast<EnumDecl>(&D)) { 374 // Enum is instantiated from a member definition of a class template? 375 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) 376 return *MemberDecl; 377 378 return D; 379 } 380 // FIXME: Adjust alias templates? 381 return D; 382 } 383 384 const RawComment *ASTContext::getRawCommentForAnyRedecl( 385 const Decl *D, 386 const Decl **OriginalDecl) const { 387 if (!D) { 388 if (OriginalDecl) 389 OriginalDecl = nullptr; 390 return nullptr; 391 } 392 393 D = &adjustDeclToTemplate(*D); 394 395 // Any comment directly attached to D? 396 { 397 auto DeclComment = DeclRawComments.find(D); 398 if (DeclComment != DeclRawComments.end()) { 399 if (OriginalDecl) 400 *OriginalDecl = D; 401 return DeclComment->second; 402 } 403 } 404 405 // Any comment attached to any redeclaration of D? 406 const Decl *CanonicalD = D->getCanonicalDecl(); 407 if (!CanonicalD) 408 return nullptr; 409 410 { 411 auto RedeclComment = RedeclChainComments.find(CanonicalD); 412 if (RedeclComment != RedeclChainComments.end()) { 413 if (OriginalDecl) 414 *OriginalDecl = RedeclComment->second; 415 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); 416 assert(CommentAtRedecl != DeclRawComments.end() && 417 "This decl is supposed to have comment attached."); 418 return CommentAtRedecl->second; 419 } 420 } 421 422 // Any redeclarations of D that we haven't checked for comments yet? 423 // We can't use DenseMap::iterator directly since it'd get invalid. 424 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { 425 auto LookupRes = CommentlessRedeclChains.find(CanonicalD); 426 if (LookupRes != CommentlessRedeclChains.end()) 427 return LookupRes->second; 428 return nullptr; 429 }(); 430 431 for (const auto Redecl : D->redecls()) { 432 assert(Redecl); 433 // Skip all redeclarations that have been checked previously. 434 if (LastCheckedRedecl) { 435 if (LastCheckedRedecl == Redecl) { 436 LastCheckedRedecl = nullptr; 437 } 438 continue; 439 } 440 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); 441 if (RedeclComment) { 442 cacheRawCommentForDecl(*Redecl, *RedeclComment); 443 if (OriginalDecl) 444 *OriginalDecl = Redecl; 445 return RedeclComment; 446 } 447 CommentlessRedeclChains[CanonicalD] = Redecl; 448 } 449 450 if (OriginalDecl) 451 *OriginalDecl = nullptr; 452 return nullptr; 453 } 454 455 void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, 456 const RawComment &Comment) const { 457 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments); 458 DeclRawComments.try_emplace(&OriginalD, &Comment); 459 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); 460 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); 461 CommentlessRedeclChains.erase(CanonicalDecl); 462 } 463 464 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, 465 SmallVectorImpl<const NamedDecl *> &Redeclared) { 466 const DeclContext *DC = ObjCMethod->getDeclContext(); 467 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) { 468 const ObjCInterfaceDecl *ID = IMD->getClassInterface(); 469 if (!ID) 470 return; 471 // Add redeclared method here. 472 for (const auto *Ext : ID->known_extensions()) { 473 if (ObjCMethodDecl *RedeclaredMethod = 474 Ext->getMethod(ObjCMethod->getSelector(), 475 ObjCMethod->isInstanceMethod())) 476 Redeclared.push_back(RedeclaredMethod); 477 } 478 } 479 } 480 481 void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, 482 const Preprocessor *PP) { 483 if (Comments.empty() || Decls.empty()) 484 return; 485 486 FileID File; 487 for (Decl *D : Decls) { 488 SourceLocation Loc = D->getLocation(); 489 if (Loc.isValid()) { 490 // See if there are any new comments that are not attached to a decl. 491 // The location doesn't have to be precise - we care only about the file. 492 File = SourceMgr.getDecomposedLoc(Loc).first; 493 break; 494 } 495 } 496 497 if (File.isInvalid()) 498 return; 499 500 auto CommentsInThisFile = Comments.getCommentsInFile(File); 501 if (!CommentsInThisFile || CommentsInThisFile->empty() || 502 CommentsInThisFile->rbegin()->second->isAttached()) 503 return; 504 505 // There is at least one comment not attached to a decl. 506 // Maybe it should be attached to one of Decls? 507 // 508 // Note that this way we pick up not only comments that precede the 509 // declaration, but also comments that *follow* the declaration -- thanks to 510 // the lookahead in the lexer: we've consumed the semicolon and looked 511 // ahead through comments. 512 513 for (const Decl *D : Decls) { 514 assert(D); 515 if (D->isInvalidDecl()) 516 continue; 517 518 D = &adjustDeclToTemplate(*D); 519 520 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 521 522 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 523 continue; 524 525 if (DeclRawComments.count(D) > 0) 526 continue; 527 528 if (RawComment *const DocComment = 529 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) { 530 cacheRawCommentForDecl(*D, *DocComment); 531 comments::FullComment *FC = DocComment->parse(*this, PP, D); 532 ParsedComments[D->getCanonicalDecl()] = FC; 533 } 534 } 535 } 536 537 comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, 538 const Decl *D) const { 539 auto *ThisDeclInfo = new (*this) comments::DeclInfo; 540 ThisDeclInfo->CommentDecl = D; 541 ThisDeclInfo->IsFilled = false; 542 ThisDeclInfo->fill(); 543 ThisDeclInfo->CommentDecl = FC->getDecl(); 544 if (!ThisDeclInfo->TemplateParameters) 545 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; 546 comments::FullComment *CFC = 547 new (*this) comments::FullComment(FC->getBlocks(), 548 ThisDeclInfo); 549 return CFC; 550 } 551 552 comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { 553 const RawComment *RC = getRawCommentForDeclNoCache(D); 554 return RC ? RC->parse(*this, nullptr, D) : nullptr; 555 } 556 557 comments::FullComment *ASTContext::getCommentForDecl( 558 const Decl *D, 559 const Preprocessor *PP) const { 560 if (!D || D->isInvalidDecl()) 561 return nullptr; 562 D = &adjustDeclToTemplate(*D); 563 564 const Decl *Canonical = D->getCanonicalDecl(); 565 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = 566 ParsedComments.find(Canonical); 567 568 if (Pos != ParsedComments.end()) { 569 if (Canonical != D) { 570 comments::FullComment *FC = Pos->second; 571 comments::FullComment *CFC = cloneFullComment(FC, D); 572 return CFC; 573 } 574 return Pos->second; 575 } 576 577 const Decl *OriginalDecl = nullptr; 578 579 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); 580 if (!RC) { 581 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { 582 SmallVector<const NamedDecl*, 8> Overridden; 583 const auto *OMD = dyn_cast<ObjCMethodDecl>(D); 584 if (OMD && OMD->isPropertyAccessor()) 585 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) 586 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) 587 return cloneFullComment(FC, D); 588 if (OMD) 589 addRedeclaredMethods(OMD, Overridden); 590 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); 591 for (unsigned i = 0, e = Overridden.size(); i < e; i++) 592 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) 593 return cloneFullComment(FC, D); 594 } 595 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) { 596 // Attach any tag type's documentation to its typedef if latter 597 // does not have one of its own. 598 QualType QT = TD->getUnderlyingType(); 599 if (const auto *TT = QT->getAs<TagType>()) 600 if (const Decl *TD = TT->getDecl()) 601 if (comments::FullComment *FC = getCommentForDecl(TD, PP)) 602 return cloneFullComment(FC, D); 603 } 604 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) { 605 while (IC->getSuperClass()) { 606 IC = IC->getSuperClass(); 607 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 608 return cloneFullComment(FC, D); 609 } 610 } 611 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) { 612 if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) 613 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 614 return cloneFullComment(FC, D); 615 } 616 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { 617 if (!(RD = RD->getDefinition())) 618 return nullptr; 619 // Check non-virtual bases. 620 for (const auto &I : RD->bases()) { 621 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) 622 continue; 623 QualType Ty = I.getType(); 624 if (Ty.isNull()) 625 continue; 626 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { 627 if (!(NonVirtualBase= NonVirtualBase->getDefinition())) 628 continue; 629 630 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) 631 return cloneFullComment(FC, D); 632 } 633 } 634 // Check virtual bases. 635 for (const auto &I : RD->vbases()) { 636 if (I.getAccessSpecifier() != AS_public) 637 continue; 638 QualType Ty = I.getType(); 639 if (Ty.isNull()) 640 continue; 641 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { 642 if (!(VirtualBase= VirtualBase->getDefinition())) 643 continue; 644 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) 645 return cloneFullComment(FC, D); 646 } 647 } 648 } 649 return nullptr; 650 } 651 652 // If the RawComment was attached to other redeclaration of this Decl, we 653 // should parse the comment in context of that other Decl. This is important 654 // because comments can contain references to parameter names which can be 655 // different across redeclarations. 656 if (D != OriginalDecl && OriginalDecl) 657 return getCommentForDecl(OriginalDecl, PP); 658 659 comments::FullComment *FC = RC->parse(*this, PP, D); 660 ParsedComments[Canonical] = FC; 661 return FC; 662 } 663 664 void 665 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 666 const ASTContext &C, 667 TemplateTemplateParmDecl *Parm) { 668 ID.AddInteger(Parm->getDepth()); 669 ID.AddInteger(Parm->getPosition()); 670 ID.AddBoolean(Parm->isParameterPack()); 671 672 TemplateParameterList *Params = Parm->getTemplateParameters(); 673 ID.AddInteger(Params->size()); 674 for (TemplateParameterList::const_iterator P = Params->begin(), 675 PEnd = Params->end(); 676 P != PEnd; ++P) { 677 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 678 ID.AddInteger(0); 679 ID.AddBoolean(TTP->isParameterPack()); 680 const TypeConstraint *TC = TTP->getTypeConstraint(); 681 ID.AddBoolean(TC != nullptr); 682 if (TC) 683 TC->getImmediatelyDeclaredConstraint()->Profile(ID, C, 684 /*Canonical=*/true); 685 if (TTP->isExpandedParameterPack()) { 686 ID.AddBoolean(true); 687 ID.AddInteger(TTP->getNumExpansionParameters()); 688 } else 689 ID.AddBoolean(false); 690 continue; 691 } 692 693 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 694 ID.AddInteger(1); 695 ID.AddBoolean(NTTP->isParameterPack()); 696 ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr()); 697 if (NTTP->isExpandedParameterPack()) { 698 ID.AddBoolean(true); 699 ID.AddInteger(NTTP->getNumExpansionTypes()); 700 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 701 QualType T = NTTP->getExpansionType(I); 702 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); 703 } 704 } else 705 ID.AddBoolean(false); 706 continue; 707 } 708 709 auto *TTP = cast<TemplateTemplateParmDecl>(*P); 710 ID.AddInteger(2); 711 Profile(ID, C, TTP); 712 } 713 Expr *RequiresClause = Parm->getTemplateParameters()->getRequiresClause(); 714 ID.AddBoolean(RequiresClause != nullptr); 715 if (RequiresClause) 716 RequiresClause->Profile(ID, C, /*Canonical=*/true); 717 } 718 719 static Expr * 720 canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC, 721 QualType ConstrainedType) { 722 // This is a bit ugly - we need to form a new immediately-declared 723 // constraint that references the new parameter; this would ideally 724 // require semantic analysis (e.g. template<C T> struct S {}; - the 725 // converted arguments of C<T> could be an argument pack if C is 726 // declared as template<typename... T> concept C = ...). 727 // We don't have semantic analysis here so we dig deep into the 728 // ready-made constraint expr and change the thing manually. 729 ConceptSpecializationExpr *CSE; 730 if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC)) 731 CSE = cast<ConceptSpecializationExpr>(Fold->getLHS()); 732 else 733 CSE = cast<ConceptSpecializationExpr>(IDC); 734 ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments(); 735 SmallVector<TemplateArgument, 3> NewConverted; 736 NewConverted.reserve(OldConverted.size()); 737 if (OldConverted.front().getKind() == TemplateArgument::Pack) { 738 // The case: 739 // template<typename... T> concept C = true; 740 // template<C<int> T> struct S; -> constraint is C<{T, int}> 741 NewConverted.push_back(ConstrainedType); 742 for (auto &Arg : OldConverted.front().pack_elements().drop_front(1)) 743 NewConverted.push_back(Arg); 744 TemplateArgument NewPack(NewConverted); 745 746 NewConverted.clear(); 747 NewConverted.push_back(NewPack); 748 assert(OldConverted.size() == 1 && 749 "Template parameter pack should be the last parameter"); 750 } else { 751 assert(OldConverted.front().getKind() == TemplateArgument::Type && 752 "Unexpected first argument kind for immediately-declared " 753 "constraint"); 754 NewConverted.push_back(ConstrainedType); 755 for (auto &Arg : OldConverted.drop_front(1)) 756 NewConverted.push_back(Arg); 757 } 758 Expr *NewIDC = ConceptSpecializationExpr::Create( 759 C, CSE->getNamedConcept(), NewConverted, nullptr, 760 CSE->isInstantiationDependent(), CSE->containsUnexpandedParameterPack()); 761 762 if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC)) 763 NewIDC = new (C) CXXFoldExpr( 764 OrigFold->getType(), /*Callee*/nullptr, SourceLocation(), NewIDC, 765 BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr, 766 SourceLocation(), /*NumExpansions=*/None); 767 return NewIDC; 768 } 769 770 TemplateTemplateParmDecl * 771 ASTContext::getCanonicalTemplateTemplateParmDecl( 772 TemplateTemplateParmDecl *TTP) const { 773 // Check if we already have a canonical template template parameter. 774 llvm::FoldingSetNodeID ID; 775 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); 776 void *InsertPos = nullptr; 777 CanonicalTemplateTemplateParm *Canonical 778 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 779 if (Canonical) 780 return Canonical->getParam(); 781 782 // Build a canonical template parameter list. 783 TemplateParameterList *Params = TTP->getTemplateParameters(); 784 SmallVector<NamedDecl *, 4> CanonParams; 785 CanonParams.reserve(Params->size()); 786 for (TemplateParameterList::const_iterator P = Params->begin(), 787 PEnd = Params->end(); 788 P != PEnd; ++P) { 789 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 790 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(*this, 791 getTranslationUnitDecl(), SourceLocation(), SourceLocation(), 792 TTP->getDepth(), TTP->getIndex(), nullptr, false, 793 TTP->isParameterPack(), TTP->hasTypeConstraint(), 794 TTP->isExpandedParameterPack() ? 795 llvm::Optional<unsigned>(TTP->getNumExpansionParameters()) : None); 796 if (const auto *TC = TTP->getTypeConstraint()) { 797 QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0); 798 Expr *NewIDC = canonicalizeImmediatelyDeclaredConstraint( 799 *this, TC->getImmediatelyDeclaredConstraint(), 800 ParamAsArgument); 801 TemplateArgumentListInfo CanonArgsAsWritten; 802 if (auto *Args = TC->getTemplateArgsAsWritten()) 803 for (const auto &ArgLoc : Args->arguments()) 804 CanonArgsAsWritten.addArgument( 805 TemplateArgumentLoc(ArgLoc.getArgument(), 806 TemplateArgumentLocInfo())); 807 NewTTP->setTypeConstraint( 808 NestedNameSpecifierLoc(), 809 DeclarationNameInfo(TC->getNamedConcept()->getDeclName(), 810 SourceLocation()), /*FoundDecl=*/nullptr, 811 // Actually canonicalizing a TemplateArgumentLoc is difficult so we 812 // simply omit the ArgsAsWritten 813 TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC); 814 } 815 CanonParams.push_back(NewTTP); 816 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 817 QualType T = getCanonicalType(NTTP->getType()); 818 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 819 NonTypeTemplateParmDecl *Param; 820 if (NTTP->isExpandedParameterPack()) { 821 SmallVector<QualType, 2> ExpandedTypes; 822 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 823 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 824 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 825 ExpandedTInfos.push_back( 826 getTrivialTypeSourceInfo(ExpandedTypes.back())); 827 } 828 829 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 830 SourceLocation(), 831 SourceLocation(), 832 NTTP->getDepth(), 833 NTTP->getPosition(), nullptr, 834 T, 835 TInfo, 836 ExpandedTypes, 837 ExpandedTInfos); 838 } else { 839 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 840 SourceLocation(), 841 SourceLocation(), 842 NTTP->getDepth(), 843 NTTP->getPosition(), nullptr, 844 T, 845 NTTP->isParameterPack(), 846 TInfo); 847 } 848 if (AutoType *AT = T->getContainedAutoType()) { 849 if (AT->isConstrained()) { 850 Param->setPlaceholderTypeConstraint( 851 canonicalizeImmediatelyDeclaredConstraint( 852 *this, NTTP->getPlaceholderTypeConstraint(), T)); 853 } 854 } 855 CanonParams.push_back(Param); 856 857 } else 858 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 859 cast<TemplateTemplateParmDecl>(*P))); 860 } 861 862 Expr *CanonRequiresClause = nullptr; 863 if (Expr *RequiresClause = TTP->getTemplateParameters()->getRequiresClause()) 864 CanonRequiresClause = RequiresClause; 865 866 TemplateTemplateParmDecl *CanonTTP 867 = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 868 SourceLocation(), TTP->getDepth(), 869 TTP->getPosition(), 870 TTP->isParameterPack(), 871 nullptr, 872 TemplateParameterList::Create(*this, SourceLocation(), 873 SourceLocation(), 874 CanonParams, 875 SourceLocation(), 876 CanonRequiresClause)); 877 878 // Get the new insert position for the node we care about. 879 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 880 assert(!Canonical && "Shouldn't be in the map!"); 881 (void)Canonical; 882 883 // Create the canonical template template parameter entry. 884 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 885 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 886 return CanonTTP; 887 } 888 889 TargetCXXABI::Kind ASTContext::getCXXABIKind() const { 890 auto Kind = getTargetInfo().getCXXABI().getKind(); 891 return getLangOpts().CXXABI.getValueOr(Kind); 892 } 893 894 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 895 if (!LangOpts.CPlusPlus) return nullptr; 896 897 switch (getCXXABIKind()) { 898 case TargetCXXABI::AppleARM64: 899 case TargetCXXABI::Fuchsia: 900 case TargetCXXABI::GenericARM: // Same as Itanium at this level 901 case TargetCXXABI::iOS: 902 case TargetCXXABI::WatchOS: 903 case TargetCXXABI::GenericAArch64: 904 case TargetCXXABI::GenericMIPS: 905 case TargetCXXABI::GenericItanium: 906 case TargetCXXABI::WebAssembly: 907 case TargetCXXABI::XL: 908 return CreateItaniumCXXABI(*this); 909 case TargetCXXABI::Microsoft: 910 return CreateMicrosoftCXXABI(*this); 911 } 912 llvm_unreachable("Invalid CXXABI type!"); 913 } 914 915 interp::Context &ASTContext::getInterpContext() { 916 if (!InterpContext) { 917 InterpContext.reset(new interp::Context(*this)); 918 } 919 return *InterpContext.get(); 920 } 921 922 ParentMapContext &ASTContext::getParentMapContext() { 923 if (!ParentMapCtx) 924 ParentMapCtx.reset(new ParentMapContext(*this)); 925 return *ParentMapCtx.get(); 926 } 927 928 static const LangASMap *getAddressSpaceMap(const TargetInfo &T, 929 const LangOptions &LOpts) { 930 if (LOpts.FakeAddressSpaceMap) { 931 // The fake address space map must have a distinct entry for each 932 // language-specific address space. 933 static const unsigned FakeAddrSpaceMap[] = { 934 0, // Default 935 1, // opencl_global 936 3, // opencl_local 937 2, // opencl_constant 938 0, // opencl_private 939 4, // opencl_generic 940 5, // opencl_global_device 941 6, // opencl_global_host 942 7, // cuda_device 943 8, // cuda_constant 944 9, // cuda_shared 945 1, // sycl_global 946 5, // sycl_global_device 947 6, // sycl_global_host 948 3, // sycl_local 949 0, // sycl_private 950 10, // ptr32_sptr 951 11, // ptr32_uptr 952 12 // ptr64 953 }; 954 return &FakeAddrSpaceMap; 955 } else { 956 return &T.getAddressSpaceMap(); 957 } 958 } 959 960 static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, 961 const LangOptions &LangOpts) { 962 switch (LangOpts.getAddressSpaceMapMangling()) { 963 case LangOptions::ASMM_Target: 964 return TI.useAddressSpaceMapMangling(); 965 case LangOptions::ASMM_On: 966 return true; 967 case LangOptions::ASMM_Off: 968 return false; 969 } 970 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); 971 } 972 973 ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, 974 IdentifierTable &idents, SelectorTable &sels, 975 Builtin::Context &builtins, TranslationUnitKind TUKind) 976 : ConstantArrayTypes(this_()), FunctionProtoTypes(this_()), 977 TemplateSpecializationTypes(this_()), 978 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), 979 SubstTemplateTemplateParmPacks(this_()), 980 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), 981 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), 982 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, 983 LangOpts.XRayNeverInstrumentFiles, 984 LangOpts.XRayAttrListFiles, SM)), 985 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)), 986 PrintingPolicy(LOpts), Idents(idents), Selectors(sels), 987 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this), 988 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), 989 CompCategories(this_()), LastSDM(nullptr, 0) { 990 addTranslationUnitDecl(); 991 } 992 993 void ASTContext::cleanup() { 994 // Release the DenseMaps associated with DeclContext objects. 995 // FIXME: Is this the ideal solution? 996 ReleaseDeclContextMaps(); 997 998 // Call all of the deallocation functions on all of their targets. 999 for (auto &Pair : Deallocations) 1000 (Pair.first)(Pair.second); 1001 Deallocations.clear(); 1002 1003 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 1004 // because they can contain DenseMaps. 1005 for (llvm::DenseMap<const ObjCContainerDecl*, 1006 const ASTRecordLayout*>::iterator 1007 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 1008 // Increment in loop to prevent using deallocated memory. 1009 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 1010 R->Destroy(*this); 1011 ObjCLayouts.clear(); 1012 1013 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 1014 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 1015 // Increment in loop to prevent using deallocated memory. 1016 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 1017 R->Destroy(*this); 1018 } 1019 ASTRecordLayouts.clear(); 1020 1021 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 1022 AEnd = DeclAttrs.end(); 1023 A != AEnd; ++A) 1024 A->second->~AttrVec(); 1025 DeclAttrs.clear(); 1026 1027 for (const auto &Value : ModuleInitializers) 1028 Value.second->~PerModuleInitializers(); 1029 ModuleInitializers.clear(); 1030 } 1031 1032 ASTContext::~ASTContext() { cleanup(); } 1033 1034 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) { 1035 TraversalScope = TopLevelDecls; 1036 getParentMapContext().clear(); 1037 } 1038 1039 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { 1040 Deallocations.push_back({Callback, Data}); 1041 } 1042 1043 void 1044 ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) { 1045 ExternalSource = std::move(Source); 1046 } 1047 1048 void ASTContext::PrintStats() const { 1049 llvm::errs() << "\n*** AST Context Stats:\n"; 1050 llvm::errs() << " " << Types.size() << " types total.\n"; 1051 1052 unsigned counts[] = { 1053 #define TYPE(Name, Parent) 0, 1054 #define ABSTRACT_TYPE(Name, Parent) 1055 #include "clang/AST/TypeNodes.inc" 1056 0 // Extra 1057 }; 1058 1059 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 1060 Type *T = Types[i]; 1061 counts[(unsigned)T->getTypeClass()]++; 1062 } 1063 1064 unsigned Idx = 0; 1065 unsigned TotalBytes = 0; 1066 #define TYPE(Name, Parent) \ 1067 if (counts[Idx]) \ 1068 llvm::errs() << " " << counts[Idx] << " " << #Name \ 1069 << " types, " << sizeof(Name##Type) << " each " \ 1070 << "(" << counts[Idx] * sizeof(Name##Type) \ 1071 << " bytes)\n"; \ 1072 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 1073 ++Idx; 1074 #define ABSTRACT_TYPE(Name, Parent) 1075 #include "clang/AST/TypeNodes.inc" 1076 1077 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 1078 1079 // Implicit special member functions. 1080 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 1081 << NumImplicitDefaultConstructors 1082 << " implicit default constructors created\n"; 1083 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 1084 << NumImplicitCopyConstructors 1085 << " implicit copy constructors created\n"; 1086 if (getLangOpts().CPlusPlus) 1087 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 1088 << NumImplicitMoveConstructors 1089 << " implicit move constructors created\n"; 1090 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 1091 << NumImplicitCopyAssignmentOperators 1092 << " implicit copy assignment operators created\n"; 1093 if (getLangOpts().CPlusPlus) 1094 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 1095 << NumImplicitMoveAssignmentOperators 1096 << " implicit move assignment operators created\n"; 1097 llvm::errs() << NumImplicitDestructorsDeclared << "/" 1098 << NumImplicitDestructors 1099 << " implicit destructors created\n"; 1100 1101 if (ExternalSource) { 1102 llvm::errs() << "\n"; 1103 ExternalSource->PrintStats(); 1104 } 1105 1106 BumpAlloc.PrintStats(); 1107 } 1108 1109 void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, 1110 bool NotifyListeners) { 1111 if (NotifyListeners) 1112 if (auto *Listener = getASTMutationListener()) 1113 Listener->RedefinedHiddenDefinition(ND, M); 1114 1115 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M); 1116 } 1117 1118 void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { 1119 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl())); 1120 if (It == MergedDefModules.end()) 1121 return; 1122 1123 auto &Merged = It->second; 1124 llvm::DenseSet<Module*> Found; 1125 for (Module *&M : Merged) 1126 if (!Found.insert(M).second) 1127 M = nullptr; 1128 llvm::erase_value(Merged, nullptr); 1129 } 1130 1131 ArrayRef<Module *> 1132 ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { 1133 auto MergedIt = 1134 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); 1135 if (MergedIt == MergedDefModules.end()) 1136 return None; 1137 return MergedIt->second; 1138 } 1139 1140 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { 1141 if (LazyInitializers.empty()) 1142 return; 1143 1144 auto *Source = Ctx.getExternalSource(); 1145 assert(Source && "lazy initializers but no external source"); 1146 1147 auto LazyInits = std::move(LazyInitializers); 1148 LazyInitializers.clear(); 1149 1150 for (auto ID : LazyInits) 1151 Initializers.push_back(Source->GetExternalDecl(ID)); 1152 1153 assert(LazyInitializers.empty() && 1154 "GetExternalDecl for lazy module initializer added more inits"); 1155 } 1156 1157 void ASTContext::addModuleInitializer(Module *M, Decl *D) { 1158 // One special case: if we add a module initializer that imports another 1159 // module, and that module's only initializer is an ImportDecl, simplify. 1160 if (const auto *ID = dyn_cast<ImportDecl>(D)) { 1161 auto It = ModuleInitializers.find(ID->getImportedModule()); 1162 1163 // Maybe the ImportDecl does nothing at all. (Common case.) 1164 if (It == ModuleInitializers.end()) 1165 return; 1166 1167 // Maybe the ImportDecl only imports another ImportDecl. 1168 auto &Imported = *It->second; 1169 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { 1170 Imported.resolve(*this); 1171 auto *OnlyDecl = Imported.Initializers.front(); 1172 if (isa<ImportDecl>(OnlyDecl)) 1173 D = OnlyDecl; 1174 } 1175 } 1176 1177 auto *&Inits = ModuleInitializers[M]; 1178 if (!Inits) 1179 Inits = new (*this) PerModuleInitializers; 1180 Inits->Initializers.push_back(D); 1181 } 1182 1183 void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { 1184 auto *&Inits = ModuleInitializers[M]; 1185 if (!Inits) 1186 Inits = new (*this) PerModuleInitializers; 1187 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), 1188 IDs.begin(), IDs.end()); 1189 } 1190 1191 ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) { 1192 auto It = ModuleInitializers.find(M); 1193 if (It == ModuleInitializers.end()) 1194 return None; 1195 1196 auto *Inits = It->second; 1197 Inits->resolve(*this); 1198 return Inits->Initializers; 1199 } 1200 1201 ExternCContextDecl *ASTContext::getExternCContextDecl() const { 1202 if (!ExternCContext) 1203 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); 1204 1205 return ExternCContext; 1206 } 1207 1208 BuiltinTemplateDecl * 1209 ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, 1210 const IdentifierInfo *II) const { 1211 auto *BuiltinTemplate = 1212 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK); 1213 BuiltinTemplate->setImplicit(); 1214 getTranslationUnitDecl()->addDecl(BuiltinTemplate); 1215 1216 return BuiltinTemplate; 1217 } 1218 1219 BuiltinTemplateDecl * 1220 ASTContext::getMakeIntegerSeqDecl() const { 1221 if (!MakeIntegerSeqDecl) 1222 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, 1223 getMakeIntegerSeqName()); 1224 return MakeIntegerSeqDecl; 1225 } 1226 1227 BuiltinTemplateDecl * 1228 ASTContext::getTypePackElementDecl() const { 1229 if (!TypePackElementDecl) 1230 TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, 1231 getTypePackElementName()); 1232 return TypePackElementDecl; 1233 } 1234 1235 RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, 1236 RecordDecl::TagKind TK) const { 1237 SourceLocation Loc; 1238 RecordDecl *NewDecl; 1239 if (getLangOpts().CPlusPlus) 1240 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, 1241 Loc, &Idents.get(Name)); 1242 else 1243 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, 1244 &Idents.get(Name)); 1245 NewDecl->setImplicit(); 1246 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( 1247 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); 1248 return NewDecl; 1249 } 1250 1251 TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, 1252 StringRef Name) const { 1253 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 1254 TypedefDecl *NewDecl = TypedefDecl::Create( 1255 const_cast<ASTContext &>(*this), getTranslationUnitDecl(), 1256 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); 1257 NewDecl->setImplicit(); 1258 return NewDecl; 1259 } 1260 1261 TypedefDecl *ASTContext::getInt128Decl() const { 1262 if (!Int128Decl) 1263 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); 1264 return Int128Decl; 1265 } 1266 1267 TypedefDecl *ASTContext::getUInt128Decl() const { 1268 if (!UInt128Decl) 1269 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); 1270 return UInt128Decl; 1271 } 1272 1273 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 1274 auto *Ty = new (*this, TypeAlignment) BuiltinType(K); 1275 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 1276 Types.push_back(Ty); 1277 } 1278 1279 void ASTContext::InitBuiltinTypes(const TargetInfo &Target, 1280 const TargetInfo *AuxTarget) { 1281 assert((!this->Target || this->Target == &Target) && 1282 "Incorrect target reinitialization"); 1283 assert(VoidTy.isNull() && "Context reinitialized?"); 1284 1285 this->Target = &Target; 1286 this->AuxTarget = AuxTarget; 1287 1288 ABI.reset(createCXXABI(Target)); 1289 AddrSpaceMap = getAddressSpaceMap(Target, LangOpts); 1290 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); 1291 1292 // C99 6.2.5p19. 1293 InitBuiltinType(VoidTy, BuiltinType::Void); 1294 1295 // C99 6.2.5p2. 1296 InitBuiltinType(BoolTy, BuiltinType::Bool); 1297 // C99 6.2.5p3. 1298 if (LangOpts.CharIsSigned) 1299 InitBuiltinType(CharTy, BuiltinType::Char_S); 1300 else 1301 InitBuiltinType(CharTy, BuiltinType::Char_U); 1302 // C99 6.2.5p4. 1303 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 1304 InitBuiltinType(ShortTy, BuiltinType::Short); 1305 InitBuiltinType(IntTy, BuiltinType::Int); 1306 InitBuiltinType(LongTy, BuiltinType::Long); 1307 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 1308 1309 // C99 6.2.5p6. 1310 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 1311 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 1312 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 1313 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 1314 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 1315 1316 // C99 6.2.5p10. 1317 InitBuiltinType(FloatTy, BuiltinType::Float); 1318 InitBuiltinType(DoubleTy, BuiltinType::Double); 1319 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 1320 1321 // GNU extension, __float128 for IEEE quadruple precision 1322 InitBuiltinType(Float128Ty, BuiltinType::Float128); 1323 1324 // __ibm128 for IBM extended precision 1325 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128); 1326 1327 // C11 extension ISO/IEC TS 18661-3 1328 InitBuiltinType(Float16Ty, BuiltinType::Float16); 1329 1330 // ISO/IEC JTC1 SC22 WG14 N1169 Extension 1331 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); 1332 InitBuiltinType(AccumTy, BuiltinType::Accum); 1333 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); 1334 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); 1335 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); 1336 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); 1337 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); 1338 InitBuiltinType(FractTy, BuiltinType::Fract); 1339 InitBuiltinType(LongFractTy, BuiltinType::LongFract); 1340 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); 1341 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); 1342 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); 1343 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); 1344 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); 1345 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); 1346 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); 1347 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); 1348 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); 1349 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); 1350 InitBuiltinType(SatFractTy, BuiltinType::SatFract); 1351 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); 1352 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); 1353 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); 1354 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); 1355 1356 // GNU extension, 128-bit integers. 1357 InitBuiltinType(Int128Ty, BuiltinType::Int128); 1358 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 1359 1360 // C++ 3.9.1p5 1361 if (TargetInfo::isTypeSigned(Target.getWCharType())) 1362 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 1363 else // -fshort-wchar makes wchar_t be unsigned. 1364 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 1365 if (LangOpts.CPlusPlus && LangOpts.WChar) 1366 WideCharTy = WCharTy; 1367 else { 1368 // C99 (or C++ using -fno-wchar). 1369 WideCharTy = getFromTargetType(Target.getWCharType()); 1370 } 1371 1372 WIntTy = getFromTargetType(Target.getWIntType()); 1373 1374 // C++20 (proposed) 1375 InitBuiltinType(Char8Ty, BuiltinType::Char8); 1376 1377 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1378 InitBuiltinType(Char16Ty, BuiltinType::Char16); 1379 else // C99 1380 Char16Ty = getFromTargetType(Target.getChar16Type()); 1381 1382 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1383 InitBuiltinType(Char32Ty, BuiltinType::Char32); 1384 else // C99 1385 Char32Ty = getFromTargetType(Target.getChar32Type()); 1386 1387 // Placeholder type for type-dependent expressions whose type is 1388 // completely unknown. No code should ever check a type against 1389 // DependentTy and users should never see it; however, it is here to 1390 // help diagnose failures to properly check for type-dependent 1391 // expressions. 1392 InitBuiltinType(DependentTy, BuiltinType::Dependent); 1393 1394 // Placeholder type for functions. 1395 InitBuiltinType(OverloadTy, BuiltinType::Overload); 1396 1397 // Placeholder type for bound members. 1398 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 1399 1400 // Placeholder type for pseudo-objects. 1401 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); 1402 1403 // "any" type; useful for debugger-like clients. 1404 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 1405 1406 // Placeholder type for unbridged ARC casts. 1407 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 1408 1409 // Placeholder type for builtin functions. 1410 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); 1411 1412 // Placeholder type for OMP array sections. 1413 if (LangOpts.OpenMP) { 1414 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1415 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); 1416 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); 1417 } 1418 if (LangOpts.MatrixTypes) 1419 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); 1420 1421 // Builtin types for 'id', 'Class', and 'SEL'. 1422 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 1423 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 1424 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 1425 1426 if (LangOpts.OpenCL) { 1427 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 1428 InitBuiltinType(SingletonId, BuiltinType::Id); 1429 #include "clang/Basic/OpenCLImageTypes.def" 1430 1431 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); 1432 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); 1433 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); 1434 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); 1435 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); 1436 1437 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 1438 InitBuiltinType(Id##Ty, BuiltinType::Id); 1439 #include "clang/Basic/OpenCLExtensionTypes.def" 1440 } 1441 1442 if (Target.hasAArch64SVETypes()) { 1443 #define SVE_TYPE(Name, Id, SingletonId) \ 1444 InitBuiltinType(SingletonId, BuiltinType::Id); 1445 #include "clang/Basic/AArch64SVEACLETypes.def" 1446 } 1447 1448 if (Target.getTriple().isPPC64()) { 1449 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 1450 InitBuiltinType(Id##Ty, BuiltinType::Id); 1451 #include "clang/Basic/PPCTypes.def" 1452 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 1453 InitBuiltinType(Id##Ty, BuiltinType::Id); 1454 #include "clang/Basic/PPCTypes.def" 1455 } 1456 1457 if (Target.hasRISCVVTypes()) { 1458 #define RVV_TYPE(Name, Id, SingletonId) \ 1459 InitBuiltinType(SingletonId, BuiltinType::Id); 1460 #include "clang/Basic/RISCVVTypes.def" 1461 } 1462 1463 // Builtin type for __objc_yes and __objc_no 1464 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? 1465 SignedCharTy : BoolTy); 1466 1467 ObjCConstantStringType = QualType(); 1468 1469 ObjCSuperType = QualType(); 1470 1471 // void * type 1472 if (LangOpts.OpenCLGenericAddressSpace) { 1473 auto Q = VoidTy.getQualifiers(); 1474 Q.setAddressSpace(LangAS::opencl_generic); 1475 VoidPtrTy = getPointerType(getCanonicalType( 1476 getQualifiedType(VoidTy.getUnqualifiedType(), Q))); 1477 } else { 1478 VoidPtrTy = getPointerType(VoidTy); 1479 } 1480 1481 // nullptr type (C++0x 2.14.7) 1482 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 1483 1484 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 1485 InitBuiltinType(HalfTy, BuiltinType::Half); 1486 1487 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); 1488 1489 // Builtin type used to help define __builtin_va_list. 1490 VaListTagDecl = nullptr; 1491 1492 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. 1493 if (LangOpts.MicrosoftExt || LangOpts.Borland) { 1494 MSGuidTagDecl = buildImplicitRecord("_GUID"); 1495 getTranslationUnitDecl()->addDecl(MSGuidTagDecl); 1496 } 1497 } 1498 1499 DiagnosticsEngine &ASTContext::getDiagnostics() const { 1500 return SourceMgr.getDiagnostics(); 1501 } 1502 1503 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 1504 AttrVec *&Result = DeclAttrs[D]; 1505 if (!Result) { 1506 void *Mem = Allocate(sizeof(AttrVec)); 1507 Result = new (Mem) AttrVec; 1508 } 1509 1510 return *Result; 1511 } 1512 1513 /// Erase the attributes corresponding to the given declaration. 1514 void ASTContext::eraseDeclAttrs(const Decl *D) { 1515 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 1516 if (Pos != DeclAttrs.end()) { 1517 Pos->second->~AttrVec(); 1518 DeclAttrs.erase(Pos); 1519 } 1520 } 1521 1522 // FIXME: Remove ? 1523 MemberSpecializationInfo * 1524 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 1525 assert(Var->isStaticDataMember() && "Not a static data member"); 1526 return getTemplateOrSpecializationInfo(Var) 1527 .dyn_cast<MemberSpecializationInfo *>(); 1528 } 1529 1530 ASTContext::TemplateOrSpecializationInfo 1531 ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { 1532 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = 1533 TemplateOrInstantiation.find(Var); 1534 if (Pos == TemplateOrInstantiation.end()) 1535 return {}; 1536 1537 return Pos->second; 1538 } 1539 1540 void 1541 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 1542 TemplateSpecializationKind TSK, 1543 SourceLocation PointOfInstantiation) { 1544 assert(Inst->isStaticDataMember() && "Not a static data member"); 1545 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 1546 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( 1547 Tmpl, TSK, PointOfInstantiation)); 1548 } 1549 1550 void 1551 ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, 1552 TemplateOrSpecializationInfo TSI) { 1553 assert(!TemplateOrInstantiation[Inst] && 1554 "Already noted what the variable was instantiated from"); 1555 TemplateOrInstantiation[Inst] = TSI; 1556 } 1557 1558 NamedDecl * 1559 ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { 1560 auto Pos = InstantiatedFromUsingDecl.find(UUD); 1561 if (Pos == InstantiatedFromUsingDecl.end()) 1562 return nullptr; 1563 1564 return Pos->second; 1565 } 1566 1567 void 1568 ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { 1569 assert((isa<UsingDecl>(Pattern) || 1570 isa<UnresolvedUsingValueDecl>(Pattern) || 1571 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 1572 "pattern decl is not a using decl"); 1573 assert((isa<UsingDecl>(Inst) || 1574 isa<UnresolvedUsingValueDecl>(Inst) || 1575 isa<UnresolvedUsingTypenameDecl>(Inst)) && 1576 "instantiation did not produce a using decl"); 1577 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 1578 InstantiatedFromUsingDecl[Inst] = Pattern; 1579 } 1580 1581 UsingEnumDecl * 1582 ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { 1583 auto Pos = InstantiatedFromUsingEnumDecl.find(UUD); 1584 if (Pos == InstantiatedFromUsingEnumDecl.end()) 1585 return nullptr; 1586 1587 return Pos->second; 1588 } 1589 1590 void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, 1591 UsingEnumDecl *Pattern) { 1592 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists"); 1593 InstantiatedFromUsingEnumDecl[Inst] = Pattern; 1594 } 1595 1596 UsingShadowDecl * 1597 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 1598 llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos 1599 = InstantiatedFromUsingShadowDecl.find(Inst); 1600 if (Pos == InstantiatedFromUsingShadowDecl.end()) 1601 return nullptr; 1602 1603 return Pos->second; 1604 } 1605 1606 void 1607 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 1608 UsingShadowDecl *Pattern) { 1609 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 1610 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 1611 } 1612 1613 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 1614 llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos 1615 = InstantiatedFromUnnamedFieldDecl.find(Field); 1616 if (Pos == InstantiatedFromUnnamedFieldDecl.end()) 1617 return nullptr; 1618 1619 return Pos->second; 1620 } 1621 1622 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 1623 FieldDecl *Tmpl) { 1624 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 1625 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 1626 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 1627 "Already noted what unnamed field was instantiated from"); 1628 1629 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 1630 } 1631 1632 ASTContext::overridden_cxx_method_iterator 1633 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 1634 return overridden_methods(Method).begin(); 1635 } 1636 1637 ASTContext::overridden_cxx_method_iterator 1638 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 1639 return overridden_methods(Method).end(); 1640 } 1641 1642 unsigned 1643 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 1644 auto Range = overridden_methods(Method); 1645 return Range.end() - Range.begin(); 1646 } 1647 1648 ASTContext::overridden_method_range 1649 ASTContext::overridden_methods(const CXXMethodDecl *Method) const { 1650 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1651 OverriddenMethods.find(Method->getCanonicalDecl()); 1652 if (Pos == OverriddenMethods.end()) 1653 return overridden_method_range(nullptr, nullptr); 1654 return overridden_method_range(Pos->second.begin(), Pos->second.end()); 1655 } 1656 1657 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 1658 const CXXMethodDecl *Overridden) { 1659 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); 1660 OverriddenMethods[Method].push_back(Overridden); 1661 } 1662 1663 void ASTContext::getOverriddenMethods( 1664 const NamedDecl *D, 1665 SmallVectorImpl<const NamedDecl *> &Overridden) const { 1666 assert(D); 1667 1668 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { 1669 Overridden.append(overridden_methods_begin(CXXMethod), 1670 overridden_methods_end(CXXMethod)); 1671 return; 1672 } 1673 1674 const auto *Method = dyn_cast<ObjCMethodDecl>(D); 1675 if (!Method) 1676 return; 1677 1678 SmallVector<const ObjCMethodDecl *, 8> OverDecls; 1679 Method->getOverriddenMethods(OverDecls); 1680 Overridden.append(OverDecls.begin(), OverDecls.end()); 1681 } 1682 1683 void ASTContext::addedLocalImportDecl(ImportDecl *Import) { 1684 assert(!Import->getNextLocalImport() && 1685 "Import declaration already in the chain"); 1686 assert(!Import->isFromASTFile() && "Non-local import declaration"); 1687 if (!FirstLocalImport) { 1688 FirstLocalImport = Import; 1689 LastLocalImport = Import; 1690 return; 1691 } 1692 1693 LastLocalImport->setNextLocalImport(Import); 1694 LastLocalImport = Import; 1695 } 1696 1697 //===----------------------------------------------------------------------===// 1698 // Type Sizing and Analysis 1699 //===----------------------------------------------------------------------===// 1700 1701 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 1702 /// scalar floating point type. 1703 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 1704 switch (T->castAs<BuiltinType>()->getKind()) { 1705 default: 1706 llvm_unreachable("Not a floating point type!"); 1707 case BuiltinType::BFloat16: 1708 return Target->getBFloat16Format(); 1709 case BuiltinType::Float16: 1710 case BuiltinType::Half: 1711 return Target->getHalfFormat(); 1712 case BuiltinType::Float: return Target->getFloatFormat(); 1713 case BuiltinType::Double: return Target->getDoubleFormat(); 1714 case BuiltinType::Ibm128: 1715 return Target->getIbm128Format(); 1716 case BuiltinType::LongDouble: 1717 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) 1718 return AuxTarget->getLongDoubleFormat(); 1719 return Target->getLongDoubleFormat(); 1720 case BuiltinType::Float128: 1721 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) 1722 return AuxTarget->getFloat128Format(); 1723 return Target->getFloat128Format(); 1724 } 1725 } 1726 1727 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { 1728 unsigned Align = Target->getCharWidth(); 1729 1730 bool UseAlignAttrOnly = false; 1731 if (unsigned AlignFromAttr = D->getMaxAlignment()) { 1732 Align = AlignFromAttr; 1733 1734 // __attribute__((aligned)) can increase or decrease alignment 1735 // *except* on a struct or struct member, where it only increases 1736 // alignment unless 'packed' is also specified. 1737 // 1738 // It is an error for alignas to decrease alignment, so we can 1739 // ignore that possibility; Sema should diagnose it. 1740 if (isa<FieldDecl>(D)) { 1741 UseAlignAttrOnly = D->hasAttr<PackedAttr>() || 1742 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1743 } else { 1744 UseAlignAttrOnly = true; 1745 } 1746 } 1747 else if (isa<FieldDecl>(D)) 1748 UseAlignAttrOnly = 1749 D->hasAttr<PackedAttr>() || 1750 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1751 1752 // If we're using the align attribute only, just ignore everything 1753 // else about the declaration and its type. 1754 if (UseAlignAttrOnly) { 1755 // do nothing 1756 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) { 1757 QualType T = VD->getType(); 1758 if (const auto *RT = T->getAs<ReferenceType>()) { 1759 if (ForAlignof) 1760 T = RT->getPointeeType(); 1761 else 1762 T = getPointerType(RT->getPointeeType()); 1763 } 1764 QualType BaseT = getBaseElementType(T); 1765 if (T->isFunctionType()) 1766 Align = getTypeInfoImpl(T.getTypePtr()).Align; 1767 else if (!BaseT->isIncompleteType()) { 1768 // Adjust alignments of declarations with array type by the 1769 // large-array alignment on the target. 1770 if (const ArrayType *arrayType = getAsArrayType(T)) { 1771 unsigned MinWidth = Target->getLargeArrayMinWidth(); 1772 if (!ForAlignof && MinWidth) { 1773 if (isa<VariableArrayType>(arrayType)) 1774 Align = std::max(Align, Target->getLargeArrayAlign()); 1775 else if (isa<ConstantArrayType>(arrayType) && 1776 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 1777 Align = std::max(Align, Target->getLargeArrayAlign()); 1778 } 1779 } 1780 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 1781 if (BaseT.getQualifiers().hasUnaligned()) 1782 Align = Target->getCharWidth(); 1783 if (const auto *VD = dyn_cast<VarDecl>(D)) { 1784 if (VD->hasGlobalStorage() && !ForAlignof) { 1785 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 1786 Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); 1787 } 1788 } 1789 } 1790 1791 // Fields can be subject to extra alignment constraints, like if 1792 // the field is packed, the struct is packed, or the struct has a 1793 // a max-field-alignment constraint (#pragma pack). So calculate 1794 // the actual alignment of the field within the struct, and then 1795 // (as we're expected to) constrain that by the alignment of the type. 1796 if (const auto *Field = dyn_cast<FieldDecl>(VD)) { 1797 const RecordDecl *Parent = Field->getParent(); 1798 // We can only produce a sensible answer if the record is valid. 1799 if (!Parent->isInvalidDecl()) { 1800 const ASTRecordLayout &Layout = getASTRecordLayout(Parent); 1801 1802 // Start with the record's overall alignment. 1803 unsigned FieldAlign = toBits(Layout.getAlignment()); 1804 1805 // Use the GCD of that and the offset within the record. 1806 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); 1807 if (Offset > 0) { 1808 // Alignment is always a power of 2, so the GCD will be a power of 2, 1809 // which means we get to do this crazy thing instead of Euclid's. 1810 uint64_t LowBitOfOffset = Offset & (~Offset + 1); 1811 if (LowBitOfOffset < FieldAlign) 1812 FieldAlign = static_cast<unsigned>(LowBitOfOffset); 1813 } 1814 1815 Align = std::min(Align, FieldAlign); 1816 } 1817 } 1818 } 1819 1820 // Some targets have hard limitation on the maximum requestable alignment in 1821 // aligned attribute for static variables. 1822 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); 1823 const auto *VD = dyn_cast<VarDecl>(D); 1824 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) 1825 Align = std::min(Align, MaxAlignedAttr); 1826 1827 return toCharUnitsFromBits(Align); 1828 } 1829 1830 CharUnits ASTContext::getExnObjectAlignment() const { 1831 return toCharUnitsFromBits(Target->getExnObjectAlignment()); 1832 } 1833 1834 // getTypeInfoDataSizeInChars - Return the size of a type, in 1835 // chars. If the type is a record, its data size is returned. This is 1836 // the size of the memcpy that's performed when assigning this type 1837 // using a trivial copy/move assignment operator. 1838 TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { 1839 TypeInfoChars Info = getTypeInfoInChars(T); 1840 1841 // In C++, objects can sometimes be allocated into the tail padding 1842 // of a base-class subobject. We decide whether that's possible 1843 // during class layout, so here we can just trust the layout results. 1844 if (getLangOpts().CPlusPlus) { 1845 if (const auto *RT = T->getAs<RecordType>()) { 1846 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); 1847 Info.Width = layout.getDataSize(); 1848 } 1849 } 1850 1851 return Info; 1852 } 1853 1854 /// getConstantArrayInfoInChars - Performing the computation in CharUnits 1855 /// instead of in bits prevents overflowing the uint64_t for some large arrays. 1856 TypeInfoChars 1857 static getConstantArrayInfoInChars(const ASTContext &Context, 1858 const ConstantArrayType *CAT) { 1859 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); 1860 uint64_t Size = CAT->getSize().getZExtValue(); 1861 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <= 1862 (uint64_t)(-1)/Size) && 1863 "Overflow in array type char size evaluation"); 1864 uint64_t Width = EltInfo.Width.getQuantity() * Size; 1865 unsigned Align = EltInfo.Align.getQuantity(); 1866 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || 1867 Context.getTargetInfo().getPointerWidth(0) == 64) 1868 Width = llvm::alignTo(Width, Align); 1869 return TypeInfoChars(CharUnits::fromQuantity(Width), 1870 CharUnits::fromQuantity(Align), 1871 EltInfo.AlignRequirement); 1872 } 1873 1874 TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { 1875 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1876 return getConstantArrayInfoInChars(*this, CAT); 1877 TypeInfo Info = getTypeInfo(T); 1878 return TypeInfoChars(toCharUnitsFromBits(Info.Width), 1879 toCharUnitsFromBits(Info.Align), Info.AlignRequirement); 1880 } 1881 1882 TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { 1883 return getTypeInfoInChars(T.getTypePtr()); 1884 } 1885 1886 bool ASTContext::isAlignmentRequired(const Type *T) const { 1887 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None; 1888 } 1889 1890 bool ASTContext::isAlignmentRequired(QualType T) const { 1891 return isAlignmentRequired(T.getTypePtr()); 1892 } 1893 1894 unsigned ASTContext::getTypeAlignIfKnown(QualType T, 1895 bool NeedsPreferredAlignment) const { 1896 // An alignment on a typedef overrides anything else. 1897 if (const auto *TT = T->getAs<TypedefType>()) 1898 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1899 return Align; 1900 1901 // If we have an (array of) complete type, we're done. 1902 T = getBaseElementType(T); 1903 if (!T->isIncompleteType()) 1904 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T); 1905 1906 // If we had an array type, its element type might be a typedef 1907 // type with an alignment attribute. 1908 if (const auto *TT = T->getAs<TypedefType>()) 1909 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1910 return Align; 1911 1912 // Otherwise, see if the declaration of the type had an attribute. 1913 if (const auto *TT = T->getAs<TagType>()) 1914 return TT->getDecl()->getMaxAlignment(); 1915 1916 return 0; 1917 } 1918 1919 TypeInfo ASTContext::getTypeInfo(const Type *T) const { 1920 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); 1921 if (I != MemoizedTypeInfo.end()) 1922 return I->second; 1923 1924 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. 1925 TypeInfo TI = getTypeInfoImpl(T); 1926 MemoizedTypeInfo[T] = TI; 1927 return TI; 1928 } 1929 1930 /// getTypeInfoImpl - Return the size of the specified type, in bits. This 1931 /// method does not work on incomplete types. 1932 /// 1933 /// FIXME: Pointers into different addr spaces could have different sizes and 1934 /// alignment requirements: getPointerInfo should take an AddrSpace, this 1935 /// should take a QualType, &c. 1936 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { 1937 uint64_t Width = 0; 1938 unsigned Align = 8; 1939 AlignRequirementKind AlignRequirement = AlignRequirementKind::None; 1940 unsigned AS = 0; 1941 switch (T->getTypeClass()) { 1942 #define TYPE(Class, Base) 1943 #define ABSTRACT_TYPE(Class, Base) 1944 #define NON_CANONICAL_TYPE(Class, Base) 1945 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1946 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ 1947 case Type::Class: \ 1948 assert(!T->isDependentType() && "should not see dependent types here"); \ 1949 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); 1950 #include "clang/AST/TypeNodes.inc" 1951 llvm_unreachable("Should not see dependent types"); 1952 1953 case Type::FunctionNoProto: 1954 case Type::FunctionProto: 1955 // GCC extension: alignof(function) = 32 bits 1956 Width = 0; 1957 Align = 32; 1958 break; 1959 1960 case Type::IncompleteArray: 1961 case Type::VariableArray: 1962 case Type::ConstantArray: { 1963 // Model non-constant sized arrays as size zero, but track the alignment. 1964 uint64_t Size = 0; 1965 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1966 Size = CAT->getSize().getZExtValue(); 1967 1968 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); 1969 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && 1970 "Overflow in array type bit size evaluation"); 1971 Width = EltInfo.Width * Size; 1972 Align = EltInfo.Align; 1973 AlignRequirement = EltInfo.AlignRequirement; 1974 if (!getTargetInfo().getCXXABI().isMicrosoft() || 1975 getTargetInfo().getPointerWidth(0) == 64) 1976 Width = llvm::alignTo(Width, Align); 1977 break; 1978 } 1979 1980 case Type::ExtVector: 1981 case Type::Vector: { 1982 const auto *VT = cast<VectorType>(T); 1983 TypeInfo EltInfo = getTypeInfo(VT->getElementType()); 1984 Width = EltInfo.Width * VT->getNumElements(); 1985 Align = Width; 1986 // If the alignment is not a power of 2, round up to the next power of 2. 1987 // This happens for non-power-of-2 length vectors. 1988 if (Align & (Align-1)) { 1989 Align = llvm::NextPowerOf2(Align); 1990 Width = llvm::alignTo(Width, Align); 1991 } 1992 // Adjust the alignment based on the target max. 1993 uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); 1994 if (TargetVectorAlign && TargetVectorAlign < Align) 1995 Align = TargetVectorAlign; 1996 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 1997 // Adjust the alignment for fixed-length SVE vectors. This is important 1998 // for non-power-of-2 vector lengths. 1999 Align = 128; 2000 else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 2001 // Adjust the alignment for fixed-length SVE predicates. 2002 Align = 16; 2003 break; 2004 } 2005 2006 case Type::ConstantMatrix: { 2007 const auto *MT = cast<ConstantMatrixType>(T); 2008 TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); 2009 // The internal layout of a matrix value is implementation defined. 2010 // Initially be ABI compatible with arrays with respect to alignment and 2011 // size. 2012 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); 2013 Align = ElementInfo.Align; 2014 break; 2015 } 2016 2017 case Type::Builtin: 2018 switch (cast<BuiltinType>(T)->getKind()) { 2019 default: llvm_unreachable("Unknown builtin type!"); 2020 case BuiltinType::Void: 2021 // GCC extension: alignof(void) = 8 bits. 2022 Width = 0; 2023 Align = 8; 2024 break; 2025 case BuiltinType::Bool: 2026 Width = Target->getBoolWidth(); 2027 Align = Target->getBoolAlign(); 2028 break; 2029 case BuiltinType::Char_S: 2030 case BuiltinType::Char_U: 2031 case BuiltinType::UChar: 2032 case BuiltinType::SChar: 2033 case BuiltinType::Char8: 2034 Width = Target->getCharWidth(); 2035 Align = Target->getCharAlign(); 2036 break; 2037 case BuiltinType::WChar_S: 2038 case BuiltinType::WChar_U: 2039 Width = Target->getWCharWidth(); 2040 Align = Target->getWCharAlign(); 2041 break; 2042 case BuiltinType::Char16: 2043 Width = Target->getChar16Width(); 2044 Align = Target->getChar16Align(); 2045 break; 2046 case BuiltinType::Char32: 2047 Width = Target->getChar32Width(); 2048 Align = Target->getChar32Align(); 2049 break; 2050 case BuiltinType::UShort: 2051 case BuiltinType::Short: 2052 Width = Target->getShortWidth(); 2053 Align = Target->getShortAlign(); 2054 break; 2055 case BuiltinType::UInt: 2056 case BuiltinType::Int: 2057 Width = Target->getIntWidth(); 2058 Align = Target->getIntAlign(); 2059 break; 2060 case BuiltinType::ULong: 2061 case BuiltinType::Long: 2062 Width = Target->getLongWidth(); 2063 Align = Target->getLongAlign(); 2064 break; 2065 case BuiltinType::ULongLong: 2066 case BuiltinType::LongLong: 2067 Width = Target->getLongLongWidth(); 2068 Align = Target->getLongLongAlign(); 2069 break; 2070 case BuiltinType::Int128: 2071 case BuiltinType::UInt128: 2072 Width = 128; 2073 Align = 128; // int128_t is 128-bit aligned on all targets. 2074 break; 2075 case BuiltinType::ShortAccum: 2076 case BuiltinType::UShortAccum: 2077 case BuiltinType::SatShortAccum: 2078 case BuiltinType::SatUShortAccum: 2079 Width = Target->getShortAccumWidth(); 2080 Align = Target->getShortAccumAlign(); 2081 break; 2082 case BuiltinType::Accum: 2083 case BuiltinType::UAccum: 2084 case BuiltinType::SatAccum: 2085 case BuiltinType::SatUAccum: 2086 Width = Target->getAccumWidth(); 2087 Align = Target->getAccumAlign(); 2088 break; 2089 case BuiltinType::LongAccum: 2090 case BuiltinType::ULongAccum: 2091 case BuiltinType::SatLongAccum: 2092 case BuiltinType::SatULongAccum: 2093 Width = Target->getLongAccumWidth(); 2094 Align = Target->getLongAccumAlign(); 2095 break; 2096 case BuiltinType::ShortFract: 2097 case BuiltinType::UShortFract: 2098 case BuiltinType::SatShortFract: 2099 case BuiltinType::SatUShortFract: 2100 Width = Target->getShortFractWidth(); 2101 Align = Target->getShortFractAlign(); 2102 break; 2103 case BuiltinType::Fract: 2104 case BuiltinType::UFract: 2105 case BuiltinType::SatFract: 2106 case BuiltinType::SatUFract: 2107 Width = Target->getFractWidth(); 2108 Align = Target->getFractAlign(); 2109 break; 2110 case BuiltinType::LongFract: 2111 case BuiltinType::ULongFract: 2112 case BuiltinType::SatLongFract: 2113 case BuiltinType::SatULongFract: 2114 Width = Target->getLongFractWidth(); 2115 Align = Target->getLongFractAlign(); 2116 break; 2117 case BuiltinType::BFloat16: 2118 Width = Target->getBFloat16Width(); 2119 Align = Target->getBFloat16Align(); 2120 break; 2121 case BuiltinType::Float16: 2122 case BuiltinType::Half: 2123 if (Target->hasFloat16Type() || !getLangOpts().OpenMP || 2124 !getLangOpts().OpenMPIsDevice) { 2125 Width = Target->getHalfWidth(); 2126 Align = Target->getHalfAlign(); 2127 } else { 2128 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2129 "Expected OpenMP device compilation."); 2130 Width = AuxTarget->getHalfWidth(); 2131 Align = AuxTarget->getHalfAlign(); 2132 } 2133 break; 2134 case BuiltinType::Float: 2135 Width = Target->getFloatWidth(); 2136 Align = Target->getFloatAlign(); 2137 break; 2138 case BuiltinType::Double: 2139 Width = Target->getDoubleWidth(); 2140 Align = Target->getDoubleAlign(); 2141 break; 2142 case BuiltinType::Ibm128: 2143 Width = Target->getIbm128Width(); 2144 Align = Target->getIbm128Align(); 2145 break; 2146 case BuiltinType::LongDouble: 2147 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2148 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || 2149 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { 2150 Width = AuxTarget->getLongDoubleWidth(); 2151 Align = AuxTarget->getLongDoubleAlign(); 2152 } else { 2153 Width = Target->getLongDoubleWidth(); 2154 Align = Target->getLongDoubleAlign(); 2155 } 2156 break; 2157 case BuiltinType::Float128: 2158 if (Target->hasFloat128Type() || !getLangOpts().OpenMP || 2159 !getLangOpts().OpenMPIsDevice) { 2160 Width = Target->getFloat128Width(); 2161 Align = Target->getFloat128Align(); 2162 } else { 2163 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2164 "Expected OpenMP device compilation."); 2165 Width = AuxTarget->getFloat128Width(); 2166 Align = AuxTarget->getFloat128Align(); 2167 } 2168 break; 2169 case BuiltinType::NullPtr: 2170 Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t) 2171 Align = Target->getPointerAlign(0); // == sizeof(void*) 2172 break; 2173 case BuiltinType::ObjCId: 2174 case BuiltinType::ObjCClass: 2175 case BuiltinType::ObjCSel: 2176 Width = Target->getPointerWidth(0); 2177 Align = Target->getPointerAlign(0); 2178 break; 2179 case BuiltinType::OCLSampler: 2180 case BuiltinType::OCLEvent: 2181 case BuiltinType::OCLClkEvent: 2182 case BuiltinType::OCLQueue: 2183 case BuiltinType::OCLReserveID: 2184 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 2185 case BuiltinType::Id: 2186 #include "clang/Basic/OpenCLImageTypes.def" 2187 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 2188 case BuiltinType::Id: 2189 #include "clang/Basic/OpenCLExtensionTypes.def" 2190 AS = getTargetAddressSpace( 2191 Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T))); 2192 Width = Target->getPointerWidth(AS); 2193 Align = Target->getPointerAlign(AS); 2194 break; 2195 // The SVE types are effectively target-specific. The length of an 2196 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple 2197 // of 128 bits. There is one predicate bit for each vector byte, so the 2198 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. 2199 // 2200 // Because the length is only known at runtime, we use a dummy value 2201 // of 0 for the static length. The alignment values are those defined 2202 // by the Procedure Call Standard for the Arm Architecture. 2203 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 2204 IsSigned, IsFP, IsBF) \ 2205 case BuiltinType::Id: \ 2206 Width = 0; \ 2207 Align = 128; \ 2208 break; 2209 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 2210 case BuiltinType::Id: \ 2211 Width = 0; \ 2212 Align = 16; \ 2213 break; 2214 #include "clang/Basic/AArch64SVEACLETypes.def" 2215 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 2216 case BuiltinType::Id: \ 2217 Width = Size; \ 2218 Align = Size; \ 2219 break; 2220 #include "clang/Basic/PPCTypes.def" 2221 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ 2222 IsFP) \ 2223 case BuiltinType::Id: \ 2224 Width = 0; \ 2225 Align = ElBits; \ 2226 break; 2227 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \ 2228 case BuiltinType::Id: \ 2229 Width = 0; \ 2230 Align = 8; \ 2231 break; 2232 #include "clang/Basic/RISCVVTypes.def" 2233 } 2234 break; 2235 case Type::ObjCObjectPointer: 2236 Width = Target->getPointerWidth(0); 2237 Align = Target->getPointerAlign(0); 2238 break; 2239 case Type::BlockPointer: 2240 AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType()); 2241 Width = Target->getPointerWidth(AS); 2242 Align = Target->getPointerAlign(AS); 2243 break; 2244 case Type::LValueReference: 2245 case Type::RValueReference: 2246 // alignof and sizeof should never enter this code path here, so we go 2247 // the pointer route. 2248 AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType()); 2249 Width = Target->getPointerWidth(AS); 2250 Align = Target->getPointerAlign(AS); 2251 break; 2252 case Type::Pointer: 2253 AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType()); 2254 Width = Target->getPointerWidth(AS); 2255 Align = Target->getPointerAlign(AS); 2256 break; 2257 case Type::MemberPointer: { 2258 const auto *MPT = cast<MemberPointerType>(T); 2259 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); 2260 Width = MPI.Width; 2261 Align = MPI.Align; 2262 break; 2263 } 2264 case Type::Complex: { 2265 // Complex types have the same alignment as their elements, but twice the 2266 // size. 2267 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType()); 2268 Width = EltInfo.Width * 2; 2269 Align = EltInfo.Align; 2270 break; 2271 } 2272 case Type::ObjCObject: 2273 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 2274 case Type::Adjusted: 2275 case Type::Decayed: 2276 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr()); 2277 case Type::ObjCInterface: { 2278 const auto *ObjCI = cast<ObjCInterfaceType>(T); 2279 if (ObjCI->getDecl()->isInvalidDecl()) { 2280 Width = 8; 2281 Align = 8; 2282 break; 2283 } 2284 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2285 Width = toBits(Layout.getSize()); 2286 Align = toBits(Layout.getAlignment()); 2287 break; 2288 } 2289 case Type::BitInt: { 2290 const auto *EIT = cast<BitIntType>(T); 2291 Align = 2292 std::min(static_cast<unsigned>(std::max( 2293 getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))), 2294 Target->getLongLongAlign()); 2295 Width = llvm::alignTo(EIT->getNumBits(), Align); 2296 break; 2297 } 2298 case Type::Record: 2299 case Type::Enum: { 2300 const auto *TT = cast<TagType>(T); 2301 2302 if (TT->getDecl()->isInvalidDecl()) { 2303 Width = 8; 2304 Align = 8; 2305 break; 2306 } 2307 2308 if (const auto *ET = dyn_cast<EnumType>(TT)) { 2309 const EnumDecl *ED = ET->getDecl(); 2310 TypeInfo Info = 2311 getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); 2312 if (unsigned AttrAlign = ED->getMaxAlignment()) { 2313 Info.Align = AttrAlign; 2314 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum; 2315 } 2316 return Info; 2317 } 2318 2319 const auto *RT = cast<RecordType>(TT); 2320 const RecordDecl *RD = RT->getDecl(); 2321 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2322 Width = toBits(Layout.getSize()); 2323 Align = toBits(Layout.getAlignment()); 2324 AlignRequirement = RD->hasAttr<AlignedAttr>() 2325 ? AlignRequirementKind::RequiredByRecord 2326 : AlignRequirementKind::None; 2327 break; 2328 } 2329 2330 case Type::SubstTemplateTypeParm: 2331 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 2332 getReplacementType().getTypePtr()); 2333 2334 case Type::Auto: 2335 case Type::DeducedTemplateSpecialization: { 2336 const auto *A = cast<DeducedType>(T); 2337 assert(!A->getDeducedType().isNull() && 2338 "cannot request the size of an undeduced or dependent auto type"); 2339 return getTypeInfo(A->getDeducedType().getTypePtr()); 2340 } 2341 2342 case Type::Paren: 2343 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 2344 2345 case Type::MacroQualified: 2346 return getTypeInfo( 2347 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr()); 2348 2349 case Type::ObjCTypeParam: 2350 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr()); 2351 2352 case Type::Using: 2353 return getTypeInfo(cast<UsingType>(T)->desugar().getTypePtr()); 2354 2355 case Type::Typedef: { 2356 const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl(); 2357 TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr()); 2358 // If the typedef has an aligned attribute on it, it overrides any computed 2359 // alignment we have. This violates the GCC documentation (which says that 2360 // attribute(aligned) can only round up) but matches its implementation. 2361 if (unsigned AttrAlign = Typedef->getMaxAlignment()) { 2362 Align = AttrAlign; 2363 AlignRequirement = AlignRequirementKind::RequiredByTypedef; 2364 } else { 2365 Align = Info.Align; 2366 AlignRequirement = Info.AlignRequirement; 2367 } 2368 Width = Info.Width; 2369 break; 2370 } 2371 2372 case Type::Elaborated: 2373 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 2374 2375 case Type::Attributed: 2376 return getTypeInfo( 2377 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 2378 2379 case Type::Atomic: { 2380 // Start with the base type information. 2381 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); 2382 Width = Info.Width; 2383 Align = Info.Align; 2384 2385 if (!Width) { 2386 // An otherwise zero-sized type should still generate an 2387 // atomic operation. 2388 Width = Target->getCharWidth(); 2389 assert(Align); 2390 } else if (Width <= Target->getMaxAtomicPromoteWidth()) { 2391 // If the size of the type doesn't exceed the platform's max 2392 // atomic promotion width, make the size and alignment more 2393 // favorable to atomic operations: 2394 2395 // Round the size up to a power of 2. 2396 if (!llvm::isPowerOf2_64(Width)) 2397 Width = llvm::NextPowerOf2(Width); 2398 2399 // Set the alignment equal to the size. 2400 Align = static_cast<unsigned>(Width); 2401 } 2402 } 2403 break; 2404 2405 case Type::Pipe: 2406 Width = Target->getPointerWidth(getTargetAddressSpace(LangAS::opencl_global)); 2407 Align = Target->getPointerAlign(getTargetAddressSpace(LangAS::opencl_global)); 2408 break; 2409 } 2410 2411 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 2412 return TypeInfo(Width, Align, AlignRequirement); 2413 } 2414 2415 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { 2416 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); 2417 if (I != MemoizedUnadjustedAlign.end()) 2418 return I->second; 2419 2420 unsigned UnadjustedAlign; 2421 if (const auto *RT = T->getAs<RecordType>()) { 2422 const RecordDecl *RD = RT->getDecl(); 2423 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2424 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2425 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) { 2426 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2427 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2428 } else { 2429 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); 2430 } 2431 2432 MemoizedUnadjustedAlign[T] = UnadjustedAlign; 2433 return UnadjustedAlign; 2434 } 2435 2436 unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { 2437 unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign(); 2438 return SimdAlign; 2439 } 2440 2441 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 2442 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 2443 return CharUnits::fromQuantity(BitSize / getCharWidth()); 2444 } 2445 2446 /// toBits - Convert a size in characters to a size in characters. 2447 int64_t ASTContext::toBits(CharUnits CharSize) const { 2448 return CharSize.getQuantity() * getCharWidth(); 2449 } 2450 2451 /// getTypeSizeInChars - Return the size of the specified type, in characters. 2452 /// This method does not work on incomplete types. 2453 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 2454 return getTypeInfoInChars(T).Width; 2455 } 2456 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 2457 return getTypeInfoInChars(T).Width; 2458 } 2459 2460 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 2461 /// characters. This method does not work on incomplete types. 2462 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 2463 return toCharUnitsFromBits(getTypeAlign(T)); 2464 } 2465 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 2466 return toCharUnitsFromBits(getTypeAlign(T)); 2467 } 2468 2469 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a 2470 /// type, in characters, before alignment adustments. This method does 2471 /// not work on incomplete types. 2472 CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { 2473 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2474 } 2475 CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { 2476 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2477 } 2478 2479 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 2480 /// type for the current target in bits. This can be different than the ABI 2481 /// alignment in cases where it is beneficial for performance or backwards 2482 /// compatibility preserving to overalign a data type. (Note: despite the name, 2483 /// the preferred alignment is ABI-impacting, and not an optimization.) 2484 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 2485 TypeInfo TI = getTypeInfo(T); 2486 unsigned ABIAlign = TI.Align; 2487 2488 T = T->getBaseElementTypeUnsafe(); 2489 2490 // The preferred alignment of member pointers is that of a pointer. 2491 if (T->isMemberPointerType()) 2492 return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); 2493 2494 if (!Target->allowsLargerPreferedTypeAlignment()) 2495 return ABIAlign; 2496 2497 if (const auto *RT = T->getAs<RecordType>()) { 2498 const RecordDecl *RD = RT->getDecl(); 2499 2500 // When used as part of a typedef, or together with a 'packed' attribute, 2501 // the 'aligned' attribute can be used to decrease alignment. Note that the 2502 // 'packed' case is already taken into consideration when computing the 2503 // alignment, we only need to handle the typedef case here. 2504 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef || 2505 RD->isInvalidDecl()) 2506 return ABIAlign; 2507 2508 unsigned PreferredAlign = static_cast<unsigned>( 2509 toBits(getASTRecordLayout(RD).PreferredAlignment)); 2510 assert(PreferredAlign >= ABIAlign && 2511 "PreferredAlign should be at least as large as ABIAlign."); 2512 return PreferredAlign; 2513 } 2514 2515 // Double (and, for targets supporting AIX `power` alignment, long double) and 2516 // long long should be naturally aligned (despite requiring less alignment) if 2517 // possible. 2518 if (const auto *CT = T->getAs<ComplexType>()) 2519 T = CT->getElementType().getTypePtr(); 2520 if (const auto *ET = T->getAs<EnumType>()) 2521 T = ET->getDecl()->getIntegerType().getTypePtr(); 2522 if (T->isSpecificBuiltinType(BuiltinType::Double) || 2523 T->isSpecificBuiltinType(BuiltinType::LongLong) || 2524 T->isSpecificBuiltinType(BuiltinType::ULongLong) || 2525 (T->isSpecificBuiltinType(BuiltinType::LongDouble) && 2526 Target->defaultsToAIXPowerAlignment())) 2527 // Don't increase the alignment if an alignment attribute was specified on a 2528 // typedef declaration. 2529 if (!TI.isAlignRequired()) 2530 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 2531 2532 return ABIAlign; 2533 } 2534 2535 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment 2536 /// for __attribute__((aligned)) on this target, to be used if no alignment 2537 /// value is specified. 2538 unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { 2539 return getTargetInfo().getDefaultAlignForAttributeAligned(); 2540 } 2541 2542 /// getAlignOfGlobalVar - Return the alignment in bits that should be given 2543 /// to a global variable of the specified type. 2544 unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { 2545 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 2546 return std::max(getPreferredTypeAlign(T), 2547 getTargetInfo().getMinGlobalAlign(TypeSize)); 2548 } 2549 2550 /// getAlignOfGlobalVarInChars - Return the alignment in characters that 2551 /// should be given to a global variable of the specified type. 2552 CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { 2553 return toCharUnitsFromBits(getAlignOfGlobalVar(T)); 2554 } 2555 2556 CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { 2557 CharUnits Offset = CharUnits::Zero(); 2558 const ASTRecordLayout *Layout = &getASTRecordLayout(RD); 2559 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { 2560 Offset += Layout->getBaseClassOffset(Base); 2561 Layout = &getASTRecordLayout(Base); 2562 } 2563 return Offset; 2564 } 2565 2566 CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const { 2567 const ValueDecl *MPD = MP.getMemberPointerDecl(); 2568 CharUnits ThisAdjustment = CharUnits::Zero(); 2569 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath(); 2570 bool DerivedMember = MP.isMemberPointerToDerivedMember(); 2571 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext()); 2572 for (unsigned I = 0, N = Path.size(); I != N; ++I) { 2573 const CXXRecordDecl *Base = RD; 2574 const CXXRecordDecl *Derived = Path[I]; 2575 if (DerivedMember) 2576 std::swap(Base, Derived); 2577 ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base); 2578 RD = Path[I]; 2579 } 2580 if (DerivedMember) 2581 ThisAdjustment = -ThisAdjustment; 2582 return ThisAdjustment; 2583 } 2584 2585 /// DeepCollectObjCIvars - 2586 /// This routine first collects all declared, but not synthesized, ivars in 2587 /// super class and then collects all ivars, including those synthesized for 2588 /// current class. This routine is used for implementation of current class 2589 /// when all ivars, declared and synthesized are known. 2590 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 2591 bool leafClass, 2592 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 2593 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 2594 DeepCollectObjCIvars(SuperClass, false, Ivars); 2595 if (!leafClass) { 2596 for (const auto *I : OI->ivars()) 2597 Ivars.push_back(I); 2598 } else { 2599 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 2600 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 2601 Iv= Iv->getNextIvar()) 2602 Ivars.push_back(Iv); 2603 } 2604 } 2605 2606 /// CollectInheritedProtocols - Collect all protocols in current class and 2607 /// those inherited by it. 2608 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 2609 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 2610 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 2611 // We can use protocol_iterator here instead of 2612 // all_referenced_protocol_iterator since we are walking all categories. 2613 for (auto *Proto : OI->all_referenced_protocols()) { 2614 CollectInheritedProtocols(Proto, Protocols); 2615 } 2616 2617 // Categories of this Interface. 2618 for (const auto *Cat : OI->visible_categories()) 2619 CollectInheritedProtocols(Cat, Protocols); 2620 2621 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 2622 while (SD) { 2623 CollectInheritedProtocols(SD, Protocols); 2624 SD = SD->getSuperClass(); 2625 } 2626 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 2627 for (auto *Proto : OC->protocols()) { 2628 CollectInheritedProtocols(Proto, Protocols); 2629 } 2630 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 2631 // Insert the protocol. 2632 if (!Protocols.insert( 2633 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second) 2634 return; 2635 2636 for (auto *Proto : OP->protocols()) 2637 CollectInheritedProtocols(Proto, Protocols); 2638 } 2639 } 2640 2641 static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, 2642 const RecordDecl *RD) { 2643 assert(RD->isUnion() && "Must be union type"); 2644 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); 2645 2646 for (const auto *Field : RD->fields()) { 2647 if (!Context.hasUniqueObjectRepresentations(Field->getType())) 2648 return false; 2649 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); 2650 if (FieldSize != UnionSize) 2651 return false; 2652 } 2653 return !RD->field_empty(); 2654 } 2655 2656 static int64_t getSubobjectOffset(const FieldDecl *Field, 2657 const ASTContext &Context, 2658 const clang::ASTRecordLayout & /*Layout*/) { 2659 return Context.getFieldOffset(Field); 2660 } 2661 2662 static int64_t getSubobjectOffset(const CXXRecordDecl *RD, 2663 const ASTContext &Context, 2664 const clang::ASTRecordLayout &Layout) { 2665 return Context.toBits(Layout.getBaseClassOffset(RD)); 2666 } 2667 2668 static llvm::Optional<int64_t> 2669 structHasUniqueObjectRepresentations(const ASTContext &Context, 2670 const RecordDecl *RD); 2671 2672 static llvm::Optional<int64_t> 2673 getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context) { 2674 if (Field->getType()->isRecordType()) { 2675 const RecordDecl *RD = Field->getType()->getAsRecordDecl(); 2676 if (!RD->isUnion()) 2677 return structHasUniqueObjectRepresentations(Context, RD); 2678 } 2679 if (!Field->getType()->isReferenceType() && 2680 !Context.hasUniqueObjectRepresentations(Field->getType())) 2681 return llvm::None; 2682 2683 int64_t FieldSizeInBits = 2684 Context.toBits(Context.getTypeSizeInChars(Field->getType())); 2685 if (Field->isBitField()) { 2686 int64_t BitfieldSize = Field->getBitWidthValue(Context); 2687 if (BitfieldSize > FieldSizeInBits) 2688 return llvm::None; 2689 FieldSizeInBits = BitfieldSize; 2690 } 2691 return FieldSizeInBits; 2692 } 2693 2694 static llvm::Optional<int64_t> 2695 getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context) { 2696 return structHasUniqueObjectRepresentations(Context, RD); 2697 } 2698 2699 template <typename RangeT> 2700 static llvm::Optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations( 2701 const RangeT &Subobjects, int64_t CurOffsetInBits, 2702 const ASTContext &Context, const clang::ASTRecordLayout &Layout) { 2703 for (const auto *Subobject : Subobjects) { 2704 llvm::Optional<int64_t> SizeInBits = 2705 getSubobjectSizeInBits(Subobject, Context); 2706 if (!SizeInBits) 2707 return llvm::None; 2708 if (*SizeInBits != 0) { 2709 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout); 2710 if (Offset != CurOffsetInBits) 2711 return llvm::None; 2712 CurOffsetInBits += *SizeInBits; 2713 } 2714 } 2715 return CurOffsetInBits; 2716 } 2717 2718 static llvm::Optional<int64_t> 2719 structHasUniqueObjectRepresentations(const ASTContext &Context, 2720 const RecordDecl *RD) { 2721 assert(!RD->isUnion() && "Must be struct/class type"); 2722 const auto &Layout = Context.getASTRecordLayout(RD); 2723 2724 int64_t CurOffsetInBits = 0; 2725 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) { 2726 if (ClassDecl->isDynamicClass()) 2727 return llvm::None; 2728 2729 SmallVector<CXXRecordDecl *, 4> Bases; 2730 for (const auto &Base : ClassDecl->bases()) { 2731 // Empty types can be inherited from, and non-empty types can potentially 2732 // have tail padding, so just make sure there isn't an error. 2733 Bases.emplace_back(Base.getType()->getAsCXXRecordDecl()); 2734 } 2735 2736 llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) { 2737 return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R); 2738 }); 2739 2740 llvm::Optional<int64_t> OffsetAfterBases = 2741 structSubobjectsHaveUniqueObjectRepresentations(Bases, CurOffsetInBits, 2742 Context, Layout); 2743 if (!OffsetAfterBases) 2744 return llvm::None; 2745 CurOffsetInBits = *OffsetAfterBases; 2746 } 2747 2748 llvm::Optional<int64_t> OffsetAfterFields = 2749 structSubobjectsHaveUniqueObjectRepresentations( 2750 RD->fields(), CurOffsetInBits, Context, Layout); 2751 if (!OffsetAfterFields) 2752 return llvm::None; 2753 CurOffsetInBits = *OffsetAfterFields; 2754 2755 return CurOffsetInBits; 2756 } 2757 2758 bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const { 2759 // C++17 [meta.unary.prop]: 2760 // The predicate condition for a template specialization 2761 // has_unique_object_representations<T> shall be 2762 // satisfied if and only if: 2763 // (9.1) - T is trivially copyable, and 2764 // (9.2) - any two objects of type T with the same value have the same 2765 // object representation, where two objects 2766 // of array or non-union class type are considered to have the same value 2767 // if their respective sequences of 2768 // direct subobjects have the same values, and two objects of union type 2769 // are considered to have the same 2770 // value if they have the same active member and the corresponding members 2771 // have the same value. 2772 // The set of scalar types for which this condition holds is 2773 // implementation-defined. [ Note: If a type has padding 2774 // bits, the condition does not hold; otherwise, the condition holds true 2775 // for unsigned integral types. -- end note ] 2776 assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); 2777 2778 // Arrays are unique only if their element type is unique. 2779 if (Ty->isArrayType()) 2780 return hasUniqueObjectRepresentations(getBaseElementType(Ty)); 2781 2782 // (9.1) - T is trivially copyable... 2783 if (!Ty.isTriviallyCopyableType(*this)) 2784 return false; 2785 2786 // All integrals and enums are unique. 2787 if (Ty->isIntegralOrEnumerationType()) 2788 return true; 2789 2790 // All other pointers are unique. 2791 if (Ty->isPointerType()) 2792 return true; 2793 2794 if (Ty->isMemberPointerType()) { 2795 const auto *MPT = Ty->getAs<MemberPointerType>(); 2796 return !ABI->getMemberPointerInfo(MPT).HasPadding; 2797 } 2798 2799 if (Ty->isRecordType()) { 2800 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl(); 2801 2802 if (Record->isInvalidDecl()) 2803 return false; 2804 2805 if (Record->isUnion()) 2806 return unionHasUniqueObjectRepresentations(*this, Record); 2807 2808 Optional<int64_t> StructSize = 2809 structHasUniqueObjectRepresentations(*this, Record); 2810 2811 return StructSize && 2812 StructSize.getValue() == static_cast<int64_t>(getTypeSize(Ty)); 2813 } 2814 2815 // FIXME: More cases to handle here (list by rsmith): 2816 // vectors (careful about, eg, vector of 3 foo) 2817 // _Complex int and friends 2818 // _Atomic T 2819 // Obj-C block pointers 2820 // Obj-C object pointers 2821 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, 2822 // clk_event_t, queue_t, reserve_id_t) 2823 // There're also Obj-C class types and the Obj-C selector type, but I think it 2824 // makes sense for those to return false here. 2825 2826 return false; 2827 } 2828 2829 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 2830 unsigned count = 0; 2831 // Count ivars declared in class extension. 2832 for (const auto *Ext : OI->known_extensions()) 2833 count += Ext->ivar_size(); 2834 2835 // Count ivar defined in this class's implementation. This 2836 // includes synthesized ivars. 2837 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 2838 count += ImplDecl->ivar_size(); 2839 2840 return count; 2841 } 2842 2843 bool ASTContext::isSentinelNullExpr(const Expr *E) { 2844 if (!E) 2845 return false; 2846 2847 // nullptr_t is always treated as null. 2848 if (E->getType()->isNullPtrType()) return true; 2849 2850 if (E->getType()->isAnyPointerType() && 2851 E->IgnoreParenCasts()->isNullPointerConstant(*this, 2852 Expr::NPC_ValueDependentIsNull)) 2853 return true; 2854 2855 // Unfortunately, __null has type 'int'. 2856 if (isa<GNUNullExpr>(E)) return true; 2857 2858 return false; 2859 } 2860 2861 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none 2862 /// exists. 2863 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 2864 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2865 I = ObjCImpls.find(D); 2866 if (I != ObjCImpls.end()) 2867 return cast<ObjCImplementationDecl>(I->second); 2868 return nullptr; 2869 } 2870 2871 /// Get the implementation of ObjCCategoryDecl, or nullptr if none 2872 /// exists. 2873 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 2874 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2875 I = ObjCImpls.find(D); 2876 if (I != ObjCImpls.end()) 2877 return cast<ObjCCategoryImplDecl>(I->second); 2878 return nullptr; 2879 } 2880 2881 /// Set the implementation of ObjCInterfaceDecl. 2882 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 2883 ObjCImplementationDecl *ImplD) { 2884 assert(IFaceD && ImplD && "Passed null params"); 2885 ObjCImpls[IFaceD] = ImplD; 2886 } 2887 2888 /// Set the implementation of ObjCCategoryDecl. 2889 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 2890 ObjCCategoryImplDecl *ImplD) { 2891 assert(CatD && ImplD && "Passed null params"); 2892 ObjCImpls[CatD] = ImplD; 2893 } 2894 2895 const ObjCMethodDecl * 2896 ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { 2897 return ObjCMethodRedecls.lookup(MD); 2898 } 2899 2900 void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, 2901 const ObjCMethodDecl *Redecl) { 2902 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); 2903 ObjCMethodRedecls[MD] = Redecl; 2904 } 2905 2906 const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( 2907 const NamedDecl *ND) const { 2908 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) 2909 return ID; 2910 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) 2911 return CD->getClassInterface(); 2912 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext())) 2913 return IMD->getClassInterface(); 2914 2915 return nullptr; 2916 } 2917 2918 /// Get the copy initialization expression of VarDecl, or nullptr if 2919 /// none exists. 2920 BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { 2921 assert(VD && "Passed null params"); 2922 assert(VD->hasAttr<BlocksAttr>() && 2923 "getBlockVarCopyInits - not __block var"); 2924 auto I = BlockVarCopyInits.find(VD); 2925 if (I != BlockVarCopyInits.end()) 2926 return I->second; 2927 return {nullptr, false}; 2928 } 2929 2930 /// Set the copy initialization expression of a block var decl. 2931 void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, 2932 bool CanThrow) { 2933 assert(VD && CopyExpr && "Passed null params"); 2934 assert(VD->hasAttr<BlocksAttr>() && 2935 "setBlockVarCopyInits - not __block var"); 2936 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); 2937 } 2938 2939 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 2940 unsigned DataSize) const { 2941 if (!DataSize) 2942 DataSize = TypeLoc::getFullDataSizeForType(T); 2943 else 2944 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 2945 "incorrect data size provided to CreateTypeSourceInfo!"); 2946 2947 auto *TInfo = 2948 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 2949 new (TInfo) TypeSourceInfo(T); 2950 return TInfo; 2951 } 2952 2953 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 2954 SourceLocation L) const { 2955 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 2956 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 2957 return DI; 2958 } 2959 2960 const ASTRecordLayout & 2961 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 2962 return getObjCLayout(D, nullptr); 2963 } 2964 2965 const ASTRecordLayout & 2966 ASTContext::getASTObjCImplementationLayout( 2967 const ObjCImplementationDecl *D) const { 2968 return getObjCLayout(D->getClassInterface(), D); 2969 } 2970 2971 //===----------------------------------------------------------------------===// 2972 // Type creation/memoization methods 2973 //===----------------------------------------------------------------------===// 2974 2975 QualType 2976 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 2977 unsigned fastQuals = quals.getFastQualifiers(); 2978 quals.removeFastQualifiers(); 2979 2980 // Check if we've already instantiated this type. 2981 llvm::FoldingSetNodeID ID; 2982 ExtQuals::Profile(ID, baseType, quals); 2983 void *insertPos = nullptr; 2984 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 2985 assert(eq->getQualifiers() == quals); 2986 return QualType(eq, fastQuals); 2987 } 2988 2989 // If the base type is not canonical, make the appropriate canonical type. 2990 QualType canon; 2991 if (!baseType->isCanonicalUnqualified()) { 2992 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 2993 canonSplit.Quals.addConsistentQualifiers(quals); 2994 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); 2995 2996 // Re-find the insert position. 2997 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 2998 } 2999 3000 auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals); 3001 ExtQualNodes.InsertNode(eq, insertPos); 3002 return QualType(eq, fastQuals); 3003 } 3004 3005 QualType ASTContext::getAddrSpaceQualType(QualType T, 3006 LangAS AddressSpace) const { 3007 QualType CanT = getCanonicalType(T); 3008 if (CanT.getAddressSpace() == AddressSpace) 3009 return T; 3010 3011 // If we are composing extended qualifiers together, merge together 3012 // into one ExtQuals node. 3013 QualifierCollector Quals; 3014 const Type *TypeNode = Quals.strip(T); 3015 3016 // If this type already has an address space specified, it cannot get 3017 // another one. 3018 assert(!Quals.hasAddressSpace() && 3019 "Type cannot be in multiple addr spaces!"); 3020 Quals.addAddressSpace(AddressSpace); 3021 3022 return getExtQualType(TypeNode, Quals); 3023 } 3024 3025 QualType ASTContext::removeAddrSpaceQualType(QualType T) const { 3026 // If the type is not qualified with an address space, just return it 3027 // immediately. 3028 if (!T.hasAddressSpace()) 3029 return T; 3030 3031 // If we are composing extended qualifiers together, merge together 3032 // into one ExtQuals node. 3033 QualifierCollector Quals; 3034 const Type *TypeNode; 3035 3036 while (T.hasAddressSpace()) { 3037 TypeNode = Quals.strip(T); 3038 3039 // If the type no longer has an address space after stripping qualifiers, 3040 // jump out. 3041 if (!QualType(TypeNode, 0).hasAddressSpace()) 3042 break; 3043 3044 // There might be sugar in the way. Strip it and try again. 3045 T = T.getSingleStepDesugaredType(*this); 3046 } 3047 3048 Quals.removeAddressSpace(); 3049 3050 // Removal of the address space can mean there are no longer any 3051 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) 3052 // or required. 3053 if (Quals.hasNonFastQualifiers()) 3054 return getExtQualType(TypeNode, Quals); 3055 else 3056 return QualType(TypeNode, Quals.getFastQualifiers()); 3057 } 3058 3059 QualType ASTContext::getObjCGCQualType(QualType T, 3060 Qualifiers::GC GCAttr) const { 3061 QualType CanT = getCanonicalType(T); 3062 if (CanT.getObjCGCAttr() == GCAttr) 3063 return T; 3064 3065 if (const auto *ptr = T->getAs<PointerType>()) { 3066 QualType Pointee = ptr->getPointeeType(); 3067 if (Pointee->isAnyPointerType()) { 3068 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 3069 return getPointerType(ResultType); 3070 } 3071 } 3072 3073 // If we are composing extended qualifiers together, merge together 3074 // into one ExtQuals node. 3075 QualifierCollector Quals; 3076 const Type *TypeNode = Quals.strip(T); 3077 3078 // If this type already has an ObjCGC specified, it cannot get 3079 // another one. 3080 assert(!Quals.hasObjCGCAttr() && 3081 "Type cannot have multiple ObjCGCs!"); 3082 Quals.addObjCGCAttr(GCAttr); 3083 3084 return getExtQualType(TypeNode, Quals); 3085 } 3086 3087 QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { 3088 if (const PointerType *Ptr = T->getAs<PointerType>()) { 3089 QualType Pointee = Ptr->getPointeeType(); 3090 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { 3091 return getPointerType(removeAddrSpaceQualType(Pointee)); 3092 } 3093 } 3094 return T; 3095 } 3096 3097 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 3098 FunctionType::ExtInfo Info) { 3099 if (T->getExtInfo() == Info) 3100 return T; 3101 3102 QualType Result; 3103 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 3104 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); 3105 } else { 3106 const auto *FPT = cast<FunctionProtoType>(T); 3107 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3108 EPI.ExtInfo = Info; 3109 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); 3110 } 3111 3112 return cast<FunctionType>(Result.getTypePtr()); 3113 } 3114 3115 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, 3116 QualType ResultType) { 3117 FD = FD->getMostRecentDecl(); 3118 while (true) { 3119 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 3120 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3121 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); 3122 if (FunctionDecl *Next = FD->getPreviousDecl()) 3123 FD = Next; 3124 else 3125 break; 3126 } 3127 if (ASTMutationListener *L = getASTMutationListener()) 3128 L->DeducedReturnType(FD, ResultType); 3129 } 3130 3131 /// Get a function type and produce the equivalent function type with the 3132 /// specified exception specification. Type sugar that can be present on a 3133 /// declaration of a function with an exception specification is permitted 3134 /// and preserved. Other type sugar (for instance, typedefs) is not. 3135 QualType ASTContext::getFunctionTypeWithExceptionSpec( 3136 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) { 3137 // Might have some parens. 3138 if (const auto *PT = dyn_cast<ParenType>(Orig)) 3139 return getParenType( 3140 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); 3141 3142 // Might be wrapped in a macro qualified type. 3143 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig)) 3144 return getMacroQualifiedType( 3145 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), 3146 MQT->getMacroIdentifier()); 3147 3148 // Might have a calling-convention attribute. 3149 if (const auto *AT = dyn_cast<AttributedType>(Orig)) 3150 return getAttributedType( 3151 AT->getAttrKind(), 3152 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), 3153 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); 3154 3155 // Anything else must be a function type. Rebuild it with the new exception 3156 // specification. 3157 const auto *Proto = Orig->castAs<FunctionProtoType>(); 3158 return getFunctionType( 3159 Proto->getReturnType(), Proto->getParamTypes(), 3160 Proto->getExtProtoInfo().withExceptionSpec(ESI)); 3161 } 3162 3163 bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, 3164 QualType U) { 3165 return hasSameType(T, U) || 3166 (getLangOpts().CPlusPlus17 && 3167 hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), 3168 getFunctionTypeWithExceptionSpec(U, EST_None))); 3169 } 3170 3171 QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { 3172 if (const auto *Proto = T->getAs<FunctionProtoType>()) { 3173 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3174 SmallVector<QualType, 16> Args(Proto->param_types()); 3175 for (unsigned i = 0, n = Args.size(); i != n; ++i) 3176 Args[i] = removePtrSizeAddrSpace(Args[i]); 3177 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); 3178 } 3179 3180 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) { 3181 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3182 return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); 3183 } 3184 3185 return T; 3186 } 3187 3188 bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { 3189 return hasSameType(T, U) || 3190 hasSameType(getFunctionTypeWithoutPtrSizes(T), 3191 getFunctionTypeWithoutPtrSizes(U)); 3192 } 3193 3194 void ASTContext::adjustExceptionSpec( 3195 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, 3196 bool AsWritten) { 3197 // Update the type. 3198 QualType Updated = 3199 getFunctionTypeWithExceptionSpec(FD->getType(), ESI); 3200 FD->setType(Updated); 3201 3202 if (!AsWritten) 3203 return; 3204 3205 // Update the type in the type source information too. 3206 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { 3207 // If the type and the type-as-written differ, we may need to update 3208 // the type-as-written too. 3209 if (TSInfo->getType() != FD->getType()) 3210 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); 3211 3212 // FIXME: When we get proper type location information for exceptions, 3213 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch 3214 // up the TypeSourceInfo; 3215 assert(TypeLoc::getFullDataSizeForType(Updated) == 3216 TypeLoc::getFullDataSizeForType(TSInfo->getType()) && 3217 "TypeLoc size mismatch from updating exception specification"); 3218 TSInfo->overrideType(Updated); 3219 } 3220 } 3221 3222 /// getComplexType - Return the uniqued reference to the type for a complex 3223 /// number with the specified element type. 3224 QualType ASTContext::getComplexType(QualType T) const { 3225 // Unique pointers, to guarantee there is only one pointer of a particular 3226 // structure. 3227 llvm::FoldingSetNodeID ID; 3228 ComplexType::Profile(ID, T); 3229 3230 void *InsertPos = nullptr; 3231 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 3232 return QualType(CT, 0); 3233 3234 // If the pointee type isn't canonical, this won't be a canonical type either, 3235 // so fill in the canonical type field. 3236 QualType Canonical; 3237 if (!T.isCanonical()) { 3238 Canonical = getComplexType(getCanonicalType(T)); 3239 3240 // Get the new insert position for the node we care about. 3241 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 3242 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3243 } 3244 auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical); 3245 Types.push_back(New); 3246 ComplexTypes.InsertNode(New, InsertPos); 3247 return QualType(New, 0); 3248 } 3249 3250 /// getPointerType - Return the uniqued reference to the type for a pointer to 3251 /// the specified type. 3252 QualType ASTContext::getPointerType(QualType T) const { 3253 // Unique pointers, to guarantee there is only one pointer of a particular 3254 // structure. 3255 llvm::FoldingSetNodeID ID; 3256 PointerType::Profile(ID, T); 3257 3258 void *InsertPos = nullptr; 3259 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3260 return QualType(PT, 0); 3261 3262 // If the pointee type isn't canonical, this won't be a canonical type either, 3263 // so fill in the canonical type field. 3264 QualType Canonical; 3265 if (!T.isCanonical()) { 3266 Canonical = getPointerType(getCanonicalType(T)); 3267 3268 // Get the new insert position for the node we care about. 3269 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3270 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3271 } 3272 auto *New = new (*this, TypeAlignment) PointerType(T, Canonical); 3273 Types.push_back(New); 3274 PointerTypes.InsertNode(New, InsertPos); 3275 return QualType(New, 0); 3276 } 3277 3278 QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { 3279 llvm::FoldingSetNodeID ID; 3280 AdjustedType::Profile(ID, Orig, New); 3281 void *InsertPos = nullptr; 3282 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3283 if (AT) 3284 return QualType(AT, 0); 3285 3286 QualType Canonical = getCanonicalType(New); 3287 3288 // Get the new insert position for the node we care about. 3289 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3290 assert(!AT && "Shouldn't be in the map!"); 3291 3292 AT = new (*this, TypeAlignment) 3293 AdjustedType(Type::Adjusted, Orig, New, Canonical); 3294 Types.push_back(AT); 3295 AdjustedTypes.InsertNode(AT, InsertPos); 3296 return QualType(AT, 0); 3297 } 3298 3299 QualType ASTContext::getDecayedType(QualType T) const { 3300 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); 3301 3302 QualType Decayed; 3303 3304 // C99 6.7.5.3p7: 3305 // A declaration of a parameter as "array of type" shall be 3306 // adjusted to "qualified pointer to type", where the type 3307 // qualifiers (if any) are those specified within the [ and ] of 3308 // the array type derivation. 3309 if (T->isArrayType()) 3310 Decayed = getArrayDecayedType(T); 3311 3312 // C99 6.7.5.3p8: 3313 // A declaration of a parameter as "function returning type" 3314 // shall be adjusted to "pointer to function returning type", as 3315 // in 6.3.2.1. 3316 if (T->isFunctionType()) 3317 Decayed = getPointerType(T); 3318 3319 llvm::FoldingSetNodeID ID; 3320 AdjustedType::Profile(ID, T, Decayed); 3321 void *InsertPos = nullptr; 3322 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3323 if (AT) 3324 return QualType(AT, 0); 3325 3326 QualType Canonical = getCanonicalType(Decayed); 3327 3328 // Get the new insert position for the node we care about. 3329 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3330 assert(!AT && "Shouldn't be in the map!"); 3331 3332 AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical); 3333 Types.push_back(AT); 3334 AdjustedTypes.InsertNode(AT, InsertPos); 3335 return QualType(AT, 0); 3336 } 3337 3338 /// getBlockPointerType - Return the uniqued reference to the type for 3339 /// a pointer to the specified block. 3340 QualType ASTContext::getBlockPointerType(QualType T) const { 3341 assert(T->isFunctionType() && "block of function types only"); 3342 // Unique pointers, to guarantee there is only one block of a particular 3343 // structure. 3344 llvm::FoldingSetNodeID ID; 3345 BlockPointerType::Profile(ID, T); 3346 3347 void *InsertPos = nullptr; 3348 if (BlockPointerType *PT = 3349 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3350 return QualType(PT, 0); 3351 3352 // If the block pointee type isn't canonical, this won't be a canonical 3353 // type either so fill in the canonical type field. 3354 QualType Canonical; 3355 if (!T.isCanonical()) { 3356 Canonical = getBlockPointerType(getCanonicalType(T)); 3357 3358 // Get the new insert position for the node we care about. 3359 BlockPointerType *NewIP = 3360 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3361 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3362 } 3363 auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical); 3364 Types.push_back(New); 3365 BlockPointerTypes.InsertNode(New, InsertPos); 3366 return QualType(New, 0); 3367 } 3368 3369 /// getLValueReferenceType - Return the uniqued reference to the type for an 3370 /// lvalue reference to the specified type. 3371 QualType 3372 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 3373 assert(getCanonicalType(T) != OverloadTy && 3374 "Unresolved overloaded function type"); 3375 3376 // Unique pointers, to guarantee there is only one pointer of a particular 3377 // structure. 3378 llvm::FoldingSetNodeID ID; 3379 ReferenceType::Profile(ID, T, SpelledAsLValue); 3380 3381 void *InsertPos = nullptr; 3382 if (LValueReferenceType *RT = 3383 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3384 return QualType(RT, 0); 3385 3386 const auto *InnerRef = T->getAs<ReferenceType>(); 3387 3388 // If the referencee type isn't canonical, this won't be a canonical type 3389 // either, so fill in the canonical type field. 3390 QualType Canonical; 3391 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 3392 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3393 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 3394 3395 // Get the new insert position for the node we care about. 3396 LValueReferenceType *NewIP = 3397 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3398 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3399 } 3400 3401 auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical, 3402 SpelledAsLValue); 3403 Types.push_back(New); 3404 LValueReferenceTypes.InsertNode(New, InsertPos); 3405 3406 return QualType(New, 0); 3407 } 3408 3409 /// getRValueReferenceType - Return the uniqued reference to the type for an 3410 /// rvalue reference to the specified type. 3411 QualType ASTContext::getRValueReferenceType(QualType T) const { 3412 // Unique pointers, to guarantee there is only one pointer of a particular 3413 // structure. 3414 llvm::FoldingSetNodeID ID; 3415 ReferenceType::Profile(ID, T, false); 3416 3417 void *InsertPos = nullptr; 3418 if (RValueReferenceType *RT = 3419 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3420 return QualType(RT, 0); 3421 3422 const auto *InnerRef = T->getAs<ReferenceType>(); 3423 3424 // If the referencee type isn't canonical, this won't be a canonical type 3425 // either, so fill in the canonical type field. 3426 QualType Canonical; 3427 if (InnerRef || !T.isCanonical()) { 3428 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3429 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 3430 3431 // Get the new insert position for the node we care about. 3432 RValueReferenceType *NewIP = 3433 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3434 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3435 } 3436 3437 auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical); 3438 Types.push_back(New); 3439 RValueReferenceTypes.InsertNode(New, InsertPos); 3440 return QualType(New, 0); 3441 } 3442 3443 /// getMemberPointerType - Return the uniqued reference to the type for a 3444 /// member pointer to the specified type, in the specified class. 3445 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 3446 // Unique pointers, to guarantee there is only one pointer of a particular 3447 // structure. 3448 llvm::FoldingSetNodeID ID; 3449 MemberPointerType::Profile(ID, T, Cls); 3450 3451 void *InsertPos = nullptr; 3452 if (MemberPointerType *PT = 3453 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3454 return QualType(PT, 0); 3455 3456 // If the pointee or class type isn't canonical, this won't be a canonical 3457 // type either, so fill in the canonical type field. 3458 QualType Canonical; 3459 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 3460 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 3461 3462 // Get the new insert position for the node we care about. 3463 MemberPointerType *NewIP = 3464 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3465 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3466 } 3467 auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical); 3468 Types.push_back(New); 3469 MemberPointerTypes.InsertNode(New, InsertPos); 3470 return QualType(New, 0); 3471 } 3472 3473 /// getConstantArrayType - Return the unique reference to the type for an 3474 /// array of the specified element type. 3475 QualType ASTContext::getConstantArrayType(QualType EltTy, 3476 const llvm::APInt &ArySizeIn, 3477 const Expr *SizeExpr, 3478 ArrayType::ArraySizeModifier ASM, 3479 unsigned IndexTypeQuals) const { 3480 assert((EltTy->isDependentType() || 3481 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 3482 "Constant array of VLAs is illegal!"); 3483 3484 // We only need the size as part of the type if it's instantiation-dependent. 3485 if (SizeExpr && !SizeExpr->isInstantiationDependent()) 3486 SizeExpr = nullptr; 3487 3488 // Convert the array size into a canonical width matching the pointer size for 3489 // the target. 3490 llvm::APInt ArySize(ArySizeIn); 3491 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); 3492 3493 llvm::FoldingSetNodeID ID; 3494 ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, 3495 IndexTypeQuals); 3496 3497 void *InsertPos = nullptr; 3498 if (ConstantArrayType *ATP = 3499 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 3500 return QualType(ATP, 0); 3501 3502 // If the element type isn't canonical or has qualifiers, or the array bound 3503 // is instantiation-dependent, this won't be a canonical type either, so fill 3504 // in the canonical type field. 3505 QualType Canon; 3506 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { 3507 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3508 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, 3509 ASM, IndexTypeQuals); 3510 Canon = getQualifiedType(Canon, canonSplit.Quals); 3511 3512 // Get the new insert position for the node we care about. 3513 ConstantArrayType *NewIP = 3514 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 3515 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3516 } 3517 3518 void *Mem = Allocate( 3519 ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0), 3520 TypeAlignment); 3521 auto *New = new (Mem) 3522 ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); 3523 ConstantArrayTypes.InsertNode(New, InsertPos); 3524 Types.push_back(New); 3525 return QualType(New, 0); 3526 } 3527 3528 /// getVariableArrayDecayedType - Turns the given type, which may be 3529 /// variably-modified, into the corresponding type with all the known 3530 /// sizes replaced with [*]. 3531 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 3532 // Vastly most common case. 3533 if (!type->isVariablyModifiedType()) return type; 3534 3535 QualType result; 3536 3537 SplitQualType split = type.getSplitDesugaredType(); 3538 const Type *ty = split.Ty; 3539 switch (ty->getTypeClass()) { 3540 #define TYPE(Class, Base) 3541 #define ABSTRACT_TYPE(Class, Base) 3542 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3543 #include "clang/AST/TypeNodes.inc" 3544 llvm_unreachable("didn't desugar past all non-canonical types?"); 3545 3546 // These types should never be variably-modified. 3547 case Type::Builtin: 3548 case Type::Complex: 3549 case Type::Vector: 3550 case Type::DependentVector: 3551 case Type::ExtVector: 3552 case Type::DependentSizedExtVector: 3553 case Type::ConstantMatrix: 3554 case Type::DependentSizedMatrix: 3555 case Type::DependentAddressSpace: 3556 case Type::ObjCObject: 3557 case Type::ObjCInterface: 3558 case Type::ObjCObjectPointer: 3559 case Type::Record: 3560 case Type::Enum: 3561 case Type::UnresolvedUsing: 3562 case Type::TypeOfExpr: 3563 case Type::TypeOf: 3564 case Type::Decltype: 3565 case Type::UnaryTransform: 3566 case Type::DependentName: 3567 case Type::InjectedClassName: 3568 case Type::TemplateSpecialization: 3569 case Type::DependentTemplateSpecialization: 3570 case Type::TemplateTypeParm: 3571 case Type::SubstTemplateTypeParmPack: 3572 case Type::Auto: 3573 case Type::DeducedTemplateSpecialization: 3574 case Type::PackExpansion: 3575 case Type::BitInt: 3576 case Type::DependentBitInt: 3577 llvm_unreachable("type should never be variably-modified"); 3578 3579 // These types can be variably-modified but should never need to 3580 // further decay. 3581 case Type::FunctionNoProto: 3582 case Type::FunctionProto: 3583 case Type::BlockPointer: 3584 case Type::MemberPointer: 3585 case Type::Pipe: 3586 return type; 3587 3588 // These types can be variably-modified. All these modifications 3589 // preserve structure except as noted by comments. 3590 // TODO: if we ever care about optimizing VLAs, there are no-op 3591 // optimizations available here. 3592 case Type::Pointer: 3593 result = getPointerType(getVariableArrayDecayedType( 3594 cast<PointerType>(ty)->getPointeeType())); 3595 break; 3596 3597 case Type::LValueReference: { 3598 const auto *lv = cast<LValueReferenceType>(ty); 3599 result = getLValueReferenceType( 3600 getVariableArrayDecayedType(lv->getPointeeType()), 3601 lv->isSpelledAsLValue()); 3602 break; 3603 } 3604 3605 case Type::RValueReference: { 3606 const auto *lv = cast<RValueReferenceType>(ty); 3607 result = getRValueReferenceType( 3608 getVariableArrayDecayedType(lv->getPointeeType())); 3609 break; 3610 } 3611 3612 case Type::Atomic: { 3613 const auto *at = cast<AtomicType>(ty); 3614 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 3615 break; 3616 } 3617 3618 case Type::ConstantArray: { 3619 const auto *cat = cast<ConstantArrayType>(ty); 3620 result = getConstantArrayType( 3621 getVariableArrayDecayedType(cat->getElementType()), 3622 cat->getSize(), 3623 cat->getSizeExpr(), 3624 cat->getSizeModifier(), 3625 cat->getIndexTypeCVRQualifiers()); 3626 break; 3627 } 3628 3629 case Type::DependentSizedArray: { 3630 const auto *dat = cast<DependentSizedArrayType>(ty); 3631 result = getDependentSizedArrayType( 3632 getVariableArrayDecayedType(dat->getElementType()), 3633 dat->getSizeExpr(), 3634 dat->getSizeModifier(), 3635 dat->getIndexTypeCVRQualifiers(), 3636 dat->getBracketsRange()); 3637 break; 3638 } 3639 3640 // Turn incomplete types into [*] types. 3641 case Type::IncompleteArray: { 3642 const auto *iat = cast<IncompleteArrayType>(ty); 3643 result = getVariableArrayType( 3644 getVariableArrayDecayedType(iat->getElementType()), 3645 /*size*/ nullptr, 3646 ArrayType::Normal, 3647 iat->getIndexTypeCVRQualifiers(), 3648 SourceRange()); 3649 break; 3650 } 3651 3652 // Turn VLA types into [*] types. 3653 case Type::VariableArray: { 3654 const auto *vat = cast<VariableArrayType>(ty); 3655 result = getVariableArrayType( 3656 getVariableArrayDecayedType(vat->getElementType()), 3657 /*size*/ nullptr, 3658 ArrayType::Star, 3659 vat->getIndexTypeCVRQualifiers(), 3660 vat->getBracketsRange()); 3661 break; 3662 } 3663 } 3664 3665 // Apply the top-level qualifiers from the original. 3666 return getQualifiedType(result, split.Quals); 3667 } 3668 3669 /// getVariableArrayType - Returns a non-unique reference to the type for a 3670 /// variable array of the specified element type. 3671 QualType ASTContext::getVariableArrayType(QualType EltTy, 3672 Expr *NumElts, 3673 ArrayType::ArraySizeModifier ASM, 3674 unsigned IndexTypeQuals, 3675 SourceRange Brackets) const { 3676 // Since we don't unique expressions, it isn't possible to unique VLA's 3677 // that have an expression provided for their size. 3678 QualType Canon; 3679 3680 // Be sure to pull qualifiers off the element type. 3681 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 3682 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3683 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, 3684 IndexTypeQuals, Brackets); 3685 Canon = getQualifiedType(Canon, canonSplit.Quals); 3686 } 3687 3688 auto *New = new (*this, TypeAlignment) 3689 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 3690 3691 VariableArrayTypes.push_back(New); 3692 Types.push_back(New); 3693 return QualType(New, 0); 3694 } 3695 3696 /// getDependentSizedArrayType - Returns a non-unique reference to 3697 /// the type for a dependently-sized array of the specified element 3698 /// type. 3699 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 3700 Expr *numElements, 3701 ArrayType::ArraySizeModifier ASM, 3702 unsigned elementTypeQuals, 3703 SourceRange brackets) const { 3704 assert((!numElements || numElements->isTypeDependent() || 3705 numElements->isValueDependent()) && 3706 "Size must be type- or value-dependent!"); 3707 3708 // Dependently-sized array types that do not have a specified number 3709 // of elements will have their sizes deduced from a dependent 3710 // initializer. We do no canonicalization here at all, which is okay 3711 // because they can't be used in most locations. 3712 if (!numElements) { 3713 auto *newType 3714 = new (*this, TypeAlignment) 3715 DependentSizedArrayType(*this, elementType, QualType(), 3716 numElements, ASM, elementTypeQuals, 3717 brackets); 3718 Types.push_back(newType); 3719 return QualType(newType, 0); 3720 } 3721 3722 // Otherwise, we actually build a new type every time, but we 3723 // also build a canonical type. 3724 3725 SplitQualType canonElementType = getCanonicalType(elementType).split(); 3726 3727 void *insertPos = nullptr; 3728 llvm::FoldingSetNodeID ID; 3729 DependentSizedArrayType::Profile(ID, *this, 3730 QualType(canonElementType.Ty, 0), 3731 ASM, elementTypeQuals, numElements); 3732 3733 // Look for an existing type with these properties. 3734 DependentSizedArrayType *canonTy = 3735 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3736 3737 // If we don't have one, build one. 3738 if (!canonTy) { 3739 canonTy = new (*this, TypeAlignment) 3740 DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0), 3741 QualType(), numElements, ASM, elementTypeQuals, 3742 brackets); 3743 DependentSizedArrayTypes.InsertNode(canonTy, insertPos); 3744 Types.push_back(canonTy); 3745 } 3746 3747 // Apply qualifiers from the element type to the array. 3748 QualType canon = getQualifiedType(QualType(canonTy,0), 3749 canonElementType.Quals); 3750 3751 // If we didn't need extra canonicalization for the element type or the size 3752 // expression, then just use that as our result. 3753 if (QualType(canonElementType.Ty, 0) == elementType && 3754 canonTy->getSizeExpr() == numElements) 3755 return canon; 3756 3757 // Otherwise, we need to build a type which follows the spelling 3758 // of the element type. 3759 auto *sugaredType 3760 = new (*this, TypeAlignment) 3761 DependentSizedArrayType(*this, elementType, canon, numElements, 3762 ASM, elementTypeQuals, brackets); 3763 Types.push_back(sugaredType); 3764 return QualType(sugaredType, 0); 3765 } 3766 3767 QualType ASTContext::getIncompleteArrayType(QualType elementType, 3768 ArrayType::ArraySizeModifier ASM, 3769 unsigned elementTypeQuals) const { 3770 llvm::FoldingSetNodeID ID; 3771 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); 3772 3773 void *insertPos = nullptr; 3774 if (IncompleteArrayType *iat = 3775 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) 3776 return QualType(iat, 0); 3777 3778 // If the element type isn't canonical, this won't be a canonical type 3779 // either, so fill in the canonical type field. We also have to pull 3780 // qualifiers off the element type. 3781 QualType canon; 3782 3783 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { 3784 SplitQualType canonSplit = getCanonicalType(elementType).split(); 3785 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), 3786 ASM, elementTypeQuals); 3787 canon = getQualifiedType(canon, canonSplit.Quals); 3788 3789 // Get the new insert position for the node we care about. 3790 IncompleteArrayType *existing = 3791 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3792 assert(!existing && "Shouldn't be in the map!"); (void) existing; 3793 } 3794 3795 auto *newType = new (*this, TypeAlignment) 3796 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); 3797 3798 IncompleteArrayTypes.InsertNode(newType, insertPos); 3799 Types.push_back(newType); 3800 return QualType(newType, 0); 3801 } 3802 3803 ASTContext::BuiltinVectorTypeInfo 3804 ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { 3805 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \ 3806 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \ 3807 NUMVECTORS}; 3808 3809 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \ 3810 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; 3811 3812 switch (Ty->getKind()) { 3813 default: 3814 llvm_unreachable("Unsupported builtin vector type"); 3815 case BuiltinType::SveInt8: 3816 return SVE_INT_ELTTY(8, 16, true, 1); 3817 case BuiltinType::SveUint8: 3818 return SVE_INT_ELTTY(8, 16, false, 1); 3819 case BuiltinType::SveInt8x2: 3820 return SVE_INT_ELTTY(8, 16, true, 2); 3821 case BuiltinType::SveUint8x2: 3822 return SVE_INT_ELTTY(8, 16, false, 2); 3823 case BuiltinType::SveInt8x3: 3824 return SVE_INT_ELTTY(8, 16, true, 3); 3825 case BuiltinType::SveUint8x3: 3826 return SVE_INT_ELTTY(8, 16, false, 3); 3827 case BuiltinType::SveInt8x4: 3828 return SVE_INT_ELTTY(8, 16, true, 4); 3829 case BuiltinType::SveUint8x4: 3830 return SVE_INT_ELTTY(8, 16, false, 4); 3831 case BuiltinType::SveInt16: 3832 return SVE_INT_ELTTY(16, 8, true, 1); 3833 case BuiltinType::SveUint16: 3834 return SVE_INT_ELTTY(16, 8, false, 1); 3835 case BuiltinType::SveInt16x2: 3836 return SVE_INT_ELTTY(16, 8, true, 2); 3837 case BuiltinType::SveUint16x2: 3838 return SVE_INT_ELTTY(16, 8, false, 2); 3839 case BuiltinType::SveInt16x3: 3840 return SVE_INT_ELTTY(16, 8, true, 3); 3841 case BuiltinType::SveUint16x3: 3842 return SVE_INT_ELTTY(16, 8, false, 3); 3843 case BuiltinType::SveInt16x4: 3844 return SVE_INT_ELTTY(16, 8, true, 4); 3845 case BuiltinType::SveUint16x4: 3846 return SVE_INT_ELTTY(16, 8, false, 4); 3847 case BuiltinType::SveInt32: 3848 return SVE_INT_ELTTY(32, 4, true, 1); 3849 case BuiltinType::SveUint32: 3850 return SVE_INT_ELTTY(32, 4, false, 1); 3851 case BuiltinType::SveInt32x2: 3852 return SVE_INT_ELTTY(32, 4, true, 2); 3853 case BuiltinType::SveUint32x2: 3854 return SVE_INT_ELTTY(32, 4, false, 2); 3855 case BuiltinType::SveInt32x3: 3856 return SVE_INT_ELTTY(32, 4, true, 3); 3857 case BuiltinType::SveUint32x3: 3858 return SVE_INT_ELTTY(32, 4, false, 3); 3859 case BuiltinType::SveInt32x4: 3860 return SVE_INT_ELTTY(32, 4, true, 4); 3861 case BuiltinType::SveUint32x4: 3862 return SVE_INT_ELTTY(32, 4, false, 4); 3863 case BuiltinType::SveInt64: 3864 return SVE_INT_ELTTY(64, 2, true, 1); 3865 case BuiltinType::SveUint64: 3866 return SVE_INT_ELTTY(64, 2, false, 1); 3867 case BuiltinType::SveInt64x2: 3868 return SVE_INT_ELTTY(64, 2, true, 2); 3869 case BuiltinType::SveUint64x2: 3870 return SVE_INT_ELTTY(64, 2, false, 2); 3871 case BuiltinType::SveInt64x3: 3872 return SVE_INT_ELTTY(64, 2, true, 3); 3873 case BuiltinType::SveUint64x3: 3874 return SVE_INT_ELTTY(64, 2, false, 3); 3875 case BuiltinType::SveInt64x4: 3876 return SVE_INT_ELTTY(64, 2, true, 4); 3877 case BuiltinType::SveUint64x4: 3878 return SVE_INT_ELTTY(64, 2, false, 4); 3879 case BuiltinType::SveBool: 3880 return SVE_ELTTY(BoolTy, 16, 1); 3881 case BuiltinType::SveFloat16: 3882 return SVE_ELTTY(HalfTy, 8, 1); 3883 case BuiltinType::SveFloat16x2: 3884 return SVE_ELTTY(HalfTy, 8, 2); 3885 case BuiltinType::SveFloat16x3: 3886 return SVE_ELTTY(HalfTy, 8, 3); 3887 case BuiltinType::SveFloat16x4: 3888 return SVE_ELTTY(HalfTy, 8, 4); 3889 case BuiltinType::SveFloat32: 3890 return SVE_ELTTY(FloatTy, 4, 1); 3891 case BuiltinType::SveFloat32x2: 3892 return SVE_ELTTY(FloatTy, 4, 2); 3893 case BuiltinType::SveFloat32x3: 3894 return SVE_ELTTY(FloatTy, 4, 3); 3895 case BuiltinType::SveFloat32x4: 3896 return SVE_ELTTY(FloatTy, 4, 4); 3897 case BuiltinType::SveFloat64: 3898 return SVE_ELTTY(DoubleTy, 2, 1); 3899 case BuiltinType::SveFloat64x2: 3900 return SVE_ELTTY(DoubleTy, 2, 2); 3901 case BuiltinType::SveFloat64x3: 3902 return SVE_ELTTY(DoubleTy, 2, 3); 3903 case BuiltinType::SveFloat64x4: 3904 return SVE_ELTTY(DoubleTy, 2, 4); 3905 case BuiltinType::SveBFloat16: 3906 return SVE_ELTTY(BFloat16Ty, 8, 1); 3907 case BuiltinType::SveBFloat16x2: 3908 return SVE_ELTTY(BFloat16Ty, 8, 2); 3909 case BuiltinType::SveBFloat16x3: 3910 return SVE_ELTTY(BFloat16Ty, 8, 3); 3911 case BuiltinType::SveBFloat16x4: 3912 return SVE_ELTTY(BFloat16Ty, 8, 4); 3913 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \ 3914 IsSigned) \ 3915 case BuiltinType::Id: \ 3916 return {getIntTypeForBitwidth(ElBits, IsSigned), \ 3917 llvm::ElementCount::getScalable(NumEls), NF}; 3918 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3919 case BuiltinType::Id: \ 3920 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ 3921 llvm::ElementCount::getScalable(NumEls), NF}; 3922 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3923 case BuiltinType::Id: \ 3924 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; 3925 #include "clang/Basic/RISCVVTypes.def" 3926 } 3927 } 3928 3929 /// getScalableVectorType - Return the unique reference to a scalable vector 3930 /// type of the specified element type and size. VectorType must be a built-in 3931 /// type. 3932 QualType ASTContext::getScalableVectorType(QualType EltTy, 3933 unsigned NumElts) const { 3934 if (Target->hasAArch64SVETypes()) { 3935 uint64_t EltTySize = getTypeSize(EltTy); 3936 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 3937 IsSigned, IsFP, IsBF) \ 3938 if (!EltTy->isBooleanType() && \ 3939 ((EltTy->hasIntegerRepresentation() && \ 3940 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3941 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 3942 IsFP && !IsBF) || \ 3943 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 3944 IsBF && !IsFP)) && \ 3945 EltTySize == ElBits && NumElts == NumEls) { \ 3946 return SingletonId; \ 3947 } 3948 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 3949 if (EltTy->isBooleanType() && NumElts == NumEls) \ 3950 return SingletonId; 3951 #include "clang/Basic/AArch64SVEACLETypes.def" 3952 } else if (Target->hasRISCVVTypes()) { 3953 uint64_t EltTySize = getTypeSize(EltTy); 3954 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ 3955 IsFP) \ 3956 if (!EltTy->isBooleanType() && \ 3957 ((EltTy->hasIntegerRepresentation() && \ 3958 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3959 (EltTy->hasFloatingRepresentation() && IsFP)) && \ 3960 EltTySize == ElBits && NumElts == NumEls) \ 3961 return SingletonId; 3962 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3963 if (EltTy->isBooleanType() && NumElts == NumEls) \ 3964 return SingletonId; 3965 #include "clang/Basic/RISCVVTypes.def" 3966 } 3967 return QualType(); 3968 } 3969 3970 /// getVectorType - Return the unique reference to a vector type of 3971 /// the specified element type and size. VectorType must be a built-in type. 3972 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, 3973 VectorType::VectorKind VecKind) const { 3974 assert(vecType->isBuiltinType()); 3975 3976 // Check if we've already instantiated a vector of this type. 3977 llvm::FoldingSetNodeID ID; 3978 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); 3979 3980 void *InsertPos = nullptr; 3981 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 3982 return QualType(VTP, 0); 3983 3984 // If the element type isn't canonical, this won't be a canonical type either, 3985 // so fill in the canonical type field. 3986 QualType Canonical; 3987 if (!vecType.isCanonical()) { 3988 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); 3989 3990 // Get the new insert position for the node we care about. 3991 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 3992 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3993 } 3994 auto *New = new (*this, TypeAlignment) 3995 VectorType(vecType, NumElts, Canonical, VecKind); 3996 VectorTypes.InsertNode(New, InsertPos); 3997 Types.push_back(New); 3998 return QualType(New, 0); 3999 } 4000 4001 QualType 4002 ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, 4003 SourceLocation AttrLoc, 4004 VectorType::VectorKind VecKind) const { 4005 llvm::FoldingSetNodeID ID; 4006 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, 4007 VecKind); 4008 void *InsertPos = nullptr; 4009 DependentVectorType *Canon = 4010 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4011 DependentVectorType *New; 4012 4013 if (Canon) { 4014 New = new (*this, TypeAlignment) DependentVectorType( 4015 *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); 4016 } else { 4017 QualType CanonVecTy = getCanonicalType(VecType); 4018 if (CanonVecTy == VecType) { 4019 New = new (*this, TypeAlignment) DependentVectorType( 4020 *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind); 4021 4022 DependentVectorType *CanonCheck = 4023 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4024 assert(!CanonCheck && 4025 "Dependent-sized vector_size canonical type broken"); 4026 (void)CanonCheck; 4027 DependentVectorTypes.InsertNode(New, InsertPos); 4028 } else { 4029 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, 4030 SourceLocation(), VecKind); 4031 New = new (*this, TypeAlignment) DependentVectorType( 4032 *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind); 4033 } 4034 } 4035 4036 Types.push_back(New); 4037 return QualType(New, 0); 4038 } 4039 4040 /// getExtVectorType - Return the unique reference to an extended vector type of 4041 /// the specified element type and size. VectorType must be a built-in type. 4042 QualType 4043 ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const { 4044 assert(vecType->isBuiltinType() || vecType->isDependentType()); 4045 4046 // Check if we've already instantiated a vector of this type. 4047 llvm::FoldingSetNodeID ID; 4048 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, 4049 VectorType::GenericVector); 4050 void *InsertPos = nullptr; 4051 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4052 return QualType(VTP, 0); 4053 4054 // If the element type isn't canonical, this won't be a canonical type either, 4055 // so fill in the canonical type field. 4056 QualType Canonical; 4057 if (!vecType.isCanonical()) { 4058 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); 4059 4060 // Get the new insert position for the node we care about. 4061 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4062 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4063 } 4064 auto *New = new (*this, TypeAlignment) 4065 ExtVectorType(vecType, NumElts, Canonical); 4066 VectorTypes.InsertNode(New, InsertPos); 4067 Types.push_back(New); 4068 return QualType(New, 0); 4069 } 4070 4071 QualType 4072 ASTContext::getDependentSizedExtVectorType(QualType vecType, 4073 Expr *SizeExpr, 4074 SourceLocation AttrLoc) const { 4075 llvm::FoldingSetNodeID ID; 4076 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), 4077 SizeExpr); 4078 4079 void *InsertPos = nullptr; 4080 DependentSizedExtVectorType *Canon 4081 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4082 DependentSizedExtVectorType *New; 4083 if (Canon) { 4084 // We already have a canonical version of this array type; use it as 4085 // the canonical type for a newly-built type. 4086 New = new (*this, TypeAlignment) 4087 DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0), 4088 SizeExpr, AttrLoc); 4089 } else { 4090 QualType CanonVecTy = getCanonicalType(vecType); 4091 if (CanonVecTy == vecType) { 4092 New = new (*this, TypeAlignment) 4093 DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr, 4094 AttrLoc); 4095 4096 DependentSizedExtVectorType *CanonCheck 4097 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4098 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); 4099 (void)CanonCheck; 4100 DependentSizedExtVectorTypes.InsertNode(New, InsertPos); 4101 } else { 4102 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, 4103 SourceLocation()); 4104 New = new (*this, TypeAlignment) DependentSizedExtVectorType( 4105 *this, vecType, CanonExtTy, SizeExpr, AttrLoc); 4106 } 4107 } 4108 4109 Types.push_back(New); 4110 return QualType(New, 0); 4111 } 4112 4113 QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, 4114 unsigned NumColumns) const { 4115 llvm::FoldingSetNodeID ID; 4116 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, 4117 Type::ConstantMatrix); 4118 4119 assert(MatrixType::isValidElementType(ElementTy) && 4120 "need a valid element type"); 4121 assert(ConstantMatrixType::isDimensionValid(NumRows) && 4122 ConstantMatrixType::isDimensionValid(NumColumns) && 4123 "need valid matrix dimensions"); 4124 void *InsertPos = nullptr; 4125 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) 4126 return QualType(MTP, 0); 4127 4128 QualType Canonical; 4129 if (!ElementTy.isCanonical()) { 4130 Canonical = 4131 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); 4132 4133 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4134 assert(!NewIP && "Matrix type shouldn't already exist in the map"); 4135 (void)NewIP; 4136 } 4137 4138 auto *New = new (*this, TypeAlignment) 4139 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); 4140 MatrixTypes.InsertNode(New, InsertPos); 4141 Types.push_back(New); 4142 return QualType(New, 0); 4143 } 4144 4145 QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, 4146 Expr *RowExpr, 4147 Expr *ColumnExpr, 4148 SourceLocation AttrLoc) const { 4149 QualType CanonElementTy = getCanonicalType(ElementTy); 4150 llvm::FoldingSetNodeID ID; 4151 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, 4152 ColumnExpr); 4153 4154 void *InsertPos = nullptr; 4155 DependentSizedMatrixType *Canon = 4156 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4157 4158 if (!Canon) { 4159 Canon = new (*this, TypeAlignment) DependentSizedMatrixType( 4160 *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc); 4161 #ifndef NDEBUG 4162 DependentSizedMatrixType *CanonCheck = 4163 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4164 assert(!CanonCheck && "Dependent-sized matrix canonical type broken"); 4165 #endif 4166 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); 4167 Types.push_back(Canon); 4168 } 4169 4170 // Already have a canonical version of the matrix type 4171 // 4172 // If it exactly matches the requested type, use it directly. 4173 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && 4174 Canon->getRowExpr() == ColumnExpr) 4175 return QualType(Canon, 0); 4176 4177 // Use Canon as the canonical type for newly-built type. 4178 DependentSizedMatrixType *New = new (*this, TypeAlignment) 4179 DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr, 4180 ColumnExpr, AttrLoc); 4181 Types.push_back(New); 4182 return QualType(New, 0); 4183 } 4184 4185 QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, 4186 Expr *AddrSpaceExpr, 4187 SourceLocation AttrLoc) const { 4188 assert(AddrSpaceExpr->isInstantiationDependent()); 4189 4190 QualType canonPointeeType = getCanonicalType(PointeeType); 4191 4192 void *insertPos = nullptr; 4193 llvm::FoldingSetNodeID ID; 4194 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, 4195 AddrSpaceExpr); 4196 4197 DependentAddressSpaceType *canonTy = 4198 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); 4199 4200 if (!canonTy) { 4201 canonTy = new (*this, TypeAlignment) 4202 DependentAddressSpaceType(*this, canonPointeeType, 4203 QualType(), AddrSpaceExpr, AttrLoc); 4204 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); 4205 Types.push_back(canonTy); 4206 } 4207 4208 if (canonPointeeType == PointeeType && 4209 canonTy->getAddrSpaceExpr() == AddrSpaceExpr) 4210 return QualType(canonTy, 0); 4211 4212 auto *sugaredType 4213 = new (*this, TypeAlignment) 4214 DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0), 4215 AddrSpaceExpr, AttrLoc); 4216 Types.push_back(sugaredType); 4217 return QualType(sugaredType, 0); 4218 } 4219 4220 /// Determine whether \p T is canonical as the result type of a function. 4221 static bool isCanonicalResultType(QualType T) { 4222 return T.isCanonical() && 4223 (T.getObjCLifetime() == Qualifiers::OCL_None || 4224 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); 4225 } 4226 4227 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. 4228 QualType 4229 ASTContext::getFunctionNoProtoType(QualType ResultTy, 4230 const FunctionType::ExtInfo &Info) const { 4231 // Unique functions, to guarantee there is only one function of a particular 4232 // structure. 4233 llvm::FoldingSetNodeID ID; 4234 FunctionNoProtoType::Profile(ID, ResultTy, Info); 4235 4236 void *InsertPos = nullptr; 4237 if (FunctionNoProtoType *FT = 4238 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 4239 return QualType(FT, 0); 4240 4241 QualType Canonical; 4242 if (!isCanonicalResultType(ResultTy)) { 4243 Canonical = 4244 getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); 4245 4246 // Get the new insert position for the node we care about. 4247 FunctionNoProtoType *NewIP = 4248 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4249 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4250 } 4251 4252 auto *New = new (*this, TypeAlignment) 4253 FunctionNoProtoType(ResultTy, Canonical, Info); 4254 Types.push_back(New); 4255 FunctionNoProtoTypes.InsertNode(New, InsertPos); 4256 return QualType(New, 0); 4257 } 4258 4259 CanQualType 4260 ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { 4261 CanQualType CanResultType = getCanonicalType(ResultType); 4262 4263 // Canonical result types do not have ARC lifetime qualifiers. 4264 if (CanResultType.getQualifiers().hasObjCLifetime()) { 4265 Qualifiers Qs = CanResultType.getQualifiers(); 4266 Qs.removeObjCLifetime(); 4267 return CanQualType::CreateUnsafe( 4268 getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); 4269 } 4270 4271 return CanResultType; 4272 } 4273 4274 static bool isCanonicalExceptionSpecification( 4275 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { 4276 if (ESI.Type == EST_None) 4277 return true; 4278 if (!NoexceptInType) 4279 return false; 4280 4281 // C++17 onwards: exception specification is part of the type, as a simple 4282 // boolean "can this function type throw". 4283 if (ESI.Type == EST_BasicNoexcept) 4284 return true; 4285 4286 // A noexcept(expr) specification is (possibly) canonical if expr is 4287 // value-dependent. 4288 if (ESI.Type == EST_DependentNoexcept) 4289 return true; 4290 4291 // A dynamic exception specification is canonical if it only contains pack 4292 // expansions (so we can't tell whether it's non-throwing) and all its 4293 // contained types are canonical. 4294 if (ESI.Type == EST_Dynamic) { 4295 bool AnyPackExpansions = false; 4296 for (QualType ET : ESI.Exceptions) { 4297 if (!ET.isCanonical()) 4298 return false; 4299 if (ET->getAs<PackExpansionType>()) 4300 AnyPackExpansions = true; 4301 } 4302 return AnyPackExpansions; 4303 } 4304 4305 return false; 4306 } 4307 4308 QualType ASTContext::getFunctionTypeInternal( 4309 QualType ResultTy, ArrayRef<QualType> ArgArray, 4310 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { 4311 size_t NumArgs = ArgArray.size(); 4312 4313 // Unique functions, to guarantee there is only one function of a particular 4314 // structure. 4315 llvm::FoldingSetNodeID ID; 4316 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, 4317 *this, true); 4318 4319 QualType Canonical; 4320 bool Unique = false; 4321 4322 void *InsertPos = nullptr; 4323 if (FunctionProtoType *FPT = 4324 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4325 QualType Existing = QualType(FPT, 0); 4326 4327 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse 4328 // it so long as our exception specification doesn't contain a dependent 4329 // noexcept expression, or we're just looking for a canonical type. 4330 // Otherwise, we're going to need to create a type 4331 // sugar node to hold the concrete expression. 4332 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || 4333 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) 4334 return Existing; 4335 4336 // We need a new type sugar node for this one, to hold the new noexcept 4337 // expression. We do no canonicalization here, but that's OK since we don't 4338 // expect to see the same noexcept expression much more than once. 4339 Canonical = getCanonicalType(Existing); 4340 Unique = true; 4341 } 4342 4343 bool NoexceptInType = getLangOpts().CPlusPlus17; 4344 bool IsCanonicalExceptionSpec = 4345 isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); 4346 4347 // Determine whether the type being created is already canonical or not. 4348 bool isCanonical = !Unique && IsCanonicalExceptionSpec && 4349 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; 4350 for (unsigned i = 0; i != NumArgs && isCanonical; ++i) 4351 if (!ArgArray[i].isCanonicalAsParam()) 4352 isCanonical = false; 4353 4354 if (OnlyWantCanonical) 4355 assert(isCanonical && 4356 "given non-canonical parameters constructing canonical type"); 4357 4358 // If this type isn't canonical, get the canonical version of it if we don't 4359 // already have it. The exception spec is only partially part of the 4360 // canonical type, and only in C++17 onwards. 4361 if (!isCanonical && Canonical.isNull()) { 4362 SmallVector<QualType, 16> CanonicalArgs; 4363 CanonicalArgs.reserve(NumArgs); 4364 for (unsigned i = 0; i != NumArgs; ++i) 4365 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); 4366 4367 llvm::SmallVector<QualType, 8> ExceptionTypeStorage; 4368 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; 4369 CanonicalEPI.HasTrailingReturn = false; 4370 4371 if (IsCanonicalExceptionSpec) { 4372 // Exception spec is already OK. 4373 } else if (NoexceptInType) { 4374 switch (EPI.ExceptionSpec.Type) { 4375 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: 4376 // We don't know yet. It shouldn't matter what we pick here; no-one 4377 // should ever look at this. 4378 LLVM_FALLTHROUGH; 4379 case EST_None: case EST_MSAny: case EST_NoexceptFalse: 4380 CanonicalEPI.ExceptionSpec.Type = EST_None; 4381 break; 4382 4383 // A dynamic exception specification is almost always "not noexcept", 4384 // with the exception that a pack expansion might expand to no types. 4385 case EST_Dynamic: { 4386 bool AnyPacks = false; 4387 for (QualType ET : EPI.ExceptionSpec.Exceptions) { 4388 if (ET->getAs<PackExpansionType>()) 4389 AnyPacks = true; 4390 ExceptionTypeStorage.push_back(getCanonicalType(ET)); 4391 } 4392 if (!AnyPacks) 4393 CanonicalEPI.ExceptionSpec.Type = EST_None; 4394 else { 4395 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; 4396 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; 4397 } 4398 break; 4399 } 4400 4401 case EST_DynamicNone: 4402 case EST_BasicNoexcept: 4403 case EST_NoexceptTrue: 4404 case EST_NoThrow: 4405 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; 4406 break; 4407 4408 case EST_DependentNoexcept: 4409 llvm_unreachable("dependent noexcept is already canonical"); 4410 } 4411 } else { 4412 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); 4413 } 4414 4415 // Adjust the canonical function result type. 4416 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); 4417 Canonical = 4418 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); 4419 4420 // Get the new insert position for the node we care about. 4421 FunctionProtoType *NewIP = 4422 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4423 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4424 } 4425 4426 // Compute the needed size to hold this FunctionProtoType and the 4427 // various trailing objects. 4428 auto ESH = FunctionProtoType::getExceptionSpecSize( 4429 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); 4430 size_t Size = FunctionProtoType::totalSizeToAlloc< 4431 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, 4432 FunctionType::ExceptionType, Expr *, FunctionDecl *, 4433 FunctionProtoType::ExtParameterInfo, Qualifiers>( 4434 NumArgs, EPI.Variadic, 4435 FunctionProtoType::hasExtraBitfields(EPI.ExceptionSpec.Type), 4436 ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr, 4437 EPI.ExtParameterInfos ? NumArgs : 0, 4438 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); 4439 4440 auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment); 4441 FunctionProtoType::ExtProtoInfo newEPI = EPI; 4442 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); 4443 Types.push_back(FTP); 4444 if (!Unique) 4445 FunctionProtoTypes.InsertNode(FTP, InsertPos); 4446 return QualType(FTP, 0); 4447 } 4448 4449 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { 4450 llvm::FoldingSetNodeID ID; 4451 PipeType::Profile(ID, T, ReadOnly); 4452 4453 void *InsertPos = nullptr; 4454 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) 4455 return QualType(PT, 0); 4456 4457 // If the pipe element type isn't canonical, this won't be a canonical type 4458 // either, so fill in the canonical type field. 4459 QualType Canonical; 4460 if (!T.isCanonical()) { 4461 Canonical = getPipeType(getCanonicalType(T), ReadOnly); 4462 4463 // Get the new insert position for the node we care about. 4464 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); 4465 assert(!NewIP && "Shouldn't be in the map!"); 4466 (void)NewIP; 4467 } 4468 auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly); 4469 Types.push_back(New); 4470 PipeTypes.InsertNode(New, InsertPos); 4471 return QualType(New, 0); 4472 } 4473 4474 QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { 4475 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. 4476 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) 4477 : Ty; 4478 } 4479 4480 QualType ASTContext::getReadPipeType(QualType T) const { 4481 return getPipeType(T, true); 4482 } 4483 4484 QualType ASTContext::getWritePipeType(QualType T) const { 4485 return getPipeType(T, false); 4486 } 4487 4488 QualType ASTContext::getBitIntType(bool IsUnsigned, unsigned NumBits) const { 4489 llvm::FoldingSetNodeID ID; 4490 BitIntType::Profile(ID, IsUnsigned, NumBits); 4491 4492 void *InsertPos = nullptr; 4493 if (BitIntType *EIT = BitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4494 return QualType(EIT, 0); 4495 4496 auto *New = new (*this, TypeAlignment) BitIntType(IsUnsigned, NumBits); 4497 BitIntTypes.InsertNode(New, InsertPos); 4498 Types.push_back(New); 4499 return QualType(New, 0); 4500 } 4501 4502 QualType ASTContext::getDependentBitIntType(bool IsUnsigned, 4503 Expr *NumBitsExpr) const { 4504 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); 4505 llvm::FoldingSetNodeID ID; 4506 DependentBitIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); 4507 4508 void *InsertPos = nullptr; 4509 if (DependentBitIntType *Existing = 4510 DependentBitIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4511 return QualType(Existing, 0); 4512 4513 auto *New = new (*this, TypeAlignment) 4514 DependentBitIntType(*this, IsUnsigned, NumBitsExpr); 4515 DependentBitIntTypes.InsertNode(New, InsertPos); 4516 4517 Types.push_back(New); 4518 return QualType(New, 0); 4519 } 4520 4521 #ifndef NDEBUG 4522 static bool NeedsInjectedClassNameType(const RecordDecl *D) { 4523 if (!isa<CXXRecordDecl>(D)) return false; 4524 const auto *RD = cast<CXXRecordDecl>(D); 4525 if (isa<ClassTemplatePartialSpecializationDecl>(RD)) 4526 return true; 4527 if (RD->getDescribedClassTemplate() && 4528 !isa<ClassTemplateSpecializationDecl>(RD)) 4529 return true; 4530 return false; 4531 } 4532 #endif 4533 4534 /// getInjectedClassNameType - Return the unique reference to the 4535 /// injected class name type for the specified templated declaration. 4536 QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, 4537 QualType TST) const { 4538 assert(NeedsInjectedClassNameType(Decl)); 4539 if (Decl->TypeForDecl) { 4540 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4541 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { 4542 assert(PrevDecl->TypeForDecl && "previous declaration has no type"); 4543 Decl->TypeForDecl = PrevDecl->TypeForDecl; 4544 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4545 } else { 4546 Type *newType = 4547 new (*this, TypeAlignment) InjectedClassNameType(Decl, TST); 4548 Decl->TypeForDecl = newType; 4549 Types.push_back(newType); 4550 } 4551 return QualType(Decl->TypeForDecl, 0); 4552 } 4553 4554 /// getTypeDeclType - Return the unique reference to the type for the 4555 /// specified type declaration. 4556 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { 4557 assert(Decl && "Passed null for Decl param"); 4558 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); 4559 4560 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl)) 4561 return getTypedefType(Typedef); 4562 4563 assert(!isa<TemplateTypeParmDecl>(Decl) && 4564 "Template type parameter types are always available."); 4565 4566 if (const auto *Record = dyn_cast<RecordDecl>(Decl)) { 4567 assert(Record->isFirstDecl() && "struct/union has previous declaration"); 4568 assert(!NeedsInjectedClassNameType(Record)); 4569 return getRecordType(Record); 4570 } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) { 4571 assert(Enum->isFirstDecl() && "enum has previous declaration"); 4572 return getEnumType(Enum); 4573 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { 4574 return getUnresolvedUsingType(Using); 4575 } else 4576 llvm_unreachable("TypeDecl without a type?"); 4577 4578 return QualType(Decl->TypeForDecl, 0); 4579 } 4580 4581 /// getTypedefType - Return the unique reference to the type for the 4582 /// specified typedef name decl. 4583 QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, 4584 QualType Underlying) const { 4585 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4586 4587 if (Underlying.isNull()) 4588 Underlying = Decl->getUnderlyingType(); 4589 QualType Canonical = getCanonicalType(Underlying); 4590 auto *newType = new (*this, TypeAlignment) 4591 TypedefType(Type::Typedef, Decl, Underlying, Canonical); 4592 Decl->TypeForDecl = newType; 4593 Types.push_back(newType); 4594 return QualType(newType, 0); 4595 } 4596 4597 QualType ASTContext::getUsingType(const UsingShadowDecl *Found, 4598 QualType Underlying) const { 4599 llvm::FoldingSetNodeID ID; 4600 UsingType::Profile(ID, Found); 4601 4602 void *InsertPos = nullptr; 4603 UsingType *T = UsingTypes.FindNodeOrInsertPos(ID, InsertPos); 4604 if (T) 4605 return QualType(T, 0); 4606 4607 assert(!Underlying.hasLocalQualifiers()); 4608 assert(Underlying == getTypeDeclType(cast<TypeDecl>(Found->getTargetDecl()))); 4609 QualType Canon = Underlying.getCanonicalType(); 4610 4611 UsingType *NewType = 4612 new (*this, TypeAlignment) UsingType(Found, Underlying, Canon); 4613 Types.push_back(NewType); 4614 UsingTypes.InsertNode(NewType, InsertPos); 4615 return QualType(NewType, 0); 4616 } 4617 4618 QualType ASTContext::getRecordType(const RecordDecl *Decl) const { 4619 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4620 4621 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) 4622 if (PrevDecl->TypeForDecl) 4623 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4624 4625 auto *newType = new (*this, TypeAlignment) RecordType(Decl); 4626 Decl->TypeForDecl = newType; 4627 Types.push_back(newType); 4628 return QualType(newType, 0); 4629 } 4630 4631 QualType ASTContext::getEnumType(const EnumDecl *Decl) const { 4632 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4633 4634 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) 4635 if (PrevDecl->TypeForDecl) 4636 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4637 4638 auto *newType = new (*this, TypeAlignment) EnumType(Decl); 4639 Decl->TypeForDecl = newType; 4640 Types.push_back(newType); 4641 return QualType(newType, 0); 4642 } 4643 4644 QualType ASTContext::getUnresolvedUsingType( 4645 const UnresolvedUsingTypenameDecl *Decl) const { 4646 if (Decl->TypeForDecl) 4647 return QualType(Decl->TypeForDecl, 0); 4648 4649 if (const UnresolvedUsingTypenameDecl *CanonicalDecl = 4650 Decl->getCanonicalDecl()) 4651 if (CanonicalDecl->TypeForDecl) 4652 return QualType(Decl->TypeForDecl = CanonicalDecl->TypeForDecl, 0); 4653 4654 Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Decl); 4655 Decl->TypeForDecl = newType; 4656 Types.push_back(newType); 4657 return QualType(newType, 0); 4658 } 4659 4660 QualType ASTContext::getAttributedType(attr::Kind attrKind, 4661 QualType modifiedType, 4662 QualType equivalentType) { 4663 llvm::FoldingSetNodeID id; 4664 AttributedType::Profile(id, attrKind, modifiedType, equivalentType); 4665 4666 void *insertPos = nullptr; 4667 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); 4668 if (type) return QualType(type, 0); 4669 4670 QualType canon = getCanonicalType(equivalentType); 4671 type = new (*this, TypeAlignment) 4672 AttributedType(canon, attrKind, modifiedType, equivalentType); 4673 4674 Types.push_back(type); 4675 AttributedTypes.InsertNode(type, insertPos); 4676 4677 return QualType(type, 0); 4678 } 4679 4680 /// Retrieve a substitution-result type. 4681 QualType 4682 ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm, 4683 QualType Replacement) const { 4684 assert(Replacement.isCanonical() 4685 && "replacement types must always be canonical"); 4686 4687 llvm::FoldingSetNodeID ID; 4688 SubstTemplateTypeParmType::Profile(ID, Parm, Replacement); 4689 void *InsertPos = nullptr; 4690 SubstTemplateTypeParmType *SubstParm 4691 = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4692 4693 if (!SubstParm) { 4694 SubstParm = new (*this, TypeAlignment) 4695 SubstTemplateTypeParmType(Parm, Replacement); 4696 Types.push_back(SubstParm); 4697 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 4698 } 4699 4700 return QualType(SubstParm, 0); 4701 } 4702 4703 /// Retrieve a 4704 QualType ASTContext::getSubstTemplateTypeParmPackType( 4705 const TemplateTypeParmType *Parm, 4706 const TemplateArgument &ArgPack) { 4707 #ifndef NDEBUG 4708 for (const auto &P : ArgPack.pack_elements()) { 4709 assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type"); 4710 assert(P.getAsType().isCanonical() && "Pack contains non-canonical type"); 4711 } 4712 #endif 4713 4714 llvm::FoldingSetNodeID ID; 4715 SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack); 4716 void *InsertPos = nullptr; 4717 if (SubstTemplateTypeParmPackType *SubstParm 4718 = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) 4719 return QualType(SubstParm, 0); 4720 4721 QualType Canon; 4722 if (!Parm->isCanonicalUnqualified()) { 4723 Canon = getCanonicalType(QualType(Parm, 0)); 4724 Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon), 4725 ArgPack); 4726 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); 4727 } 4728 4729 auto *SubstParm 4730 = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon, 4731 ArgPack); 4732 Types.push_back(SubstParm); 4733 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); 4734 return QualType(SubstParm, 0); 4735 } 4736 4737 /// Retrieve the template type parameter type for a template 4738 /// parameter or parameter pack with the given depth, index, and (optionally) 4739 /// name. 4740 QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, 4741 bool ParameterPack, 4742 TemplateTypeParmDecl *TTPDecl) const { 4743 llvm::FoldingSetNodeID ID; 4744 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); 4745 void *InsertPos = nullptr; 4746 TemplateTypeParmType *TypeParm 4747 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4748 4749 if (TypeParm) 4750 return QualType(TypeParm, 0); 4751 4752 if (TTPDecl) { 4753 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); 4754 TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon); 4755 4756 TemplateTypeParmType *TypeCheck 4757 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4758 assert(!TypeCheck && "Template type parameter canonical type broken"); 4759 (void)TypeCheck; 4760 } else 4761 TypeParm = new (*this, TypeAlignment) 4762 TemplateTypeParmType(Depth, Index, ParameterPack); 4763 4764 Types.push_back(TypeParm); 4765 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); 4766 4767 return QualType(TypeParm, 0); 4768 } 4769 4770 TypeSourceInfo * 4771 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, 4772 SourceLocation NameLoc, 4773 const TemplateArgumentListInfo &Args, 4774 QualType Underlying) const { 4775 assert(!Name.getAsDependentTemplateName() && 4776 "No dependent template names here!"); 4777 QualType TST = getTemplateSpecializationType(Name, Args, Underlying); 4778 4779 TypeSourceInfo *DI = CreateTypeSourceInfo(TST); 4780 TemplateSpecializationTypeLoc TL = 4781 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); 4782 TL.setTemplateKeywordLoc(SourceLocation()); 4783 TL.setTemplateNameLoc(NameLoc); 4784 TL.setLAngleLoc(Args.getLAngleLoc()); 4785 TL.setRAngleLoc(Args.getRAngleLoc()); 4786 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) 4787 TL.setArgLocInfo(i, Args[i].getLocInfo()); 4788 return DI; 4789 } 4790 4791 QualType 4792 ASTContext::getTemplateSpecializationType(TemplateName Template, 4793 const TemplateArgumentListInfo &Args, 4794 QualType Underlying) const { 4795 assert(!Template.getAsDependentTemplateName() && 4796 "No dependent template names here!"); 4797 4798 SmallVector<TemplateArgument, 4> ArgVec; 4799 ArgVec.reserve(Args.size()); 4800 for (const TemplateArgumentLoc &Arg : Args.arguments()) 4801 ArgVec.push_back(Arg.getArgument()); 4802 4803 return getTemplateSpecializationType(Template, ArgVec, Underlying); 4804 } 4805 4806 #ifndef NDEBUG 4807 static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) { 4808 for (const TemplateArgument &Arg : Args) 4809 if (Arg.isPackExpansion()) 4810 return true; 4811 4812 return true; 4813 } 4814 #endif 4815 4816 QualType 4817 ASTContext::getTemplateSpecializationType(TemplateName Template, 4818 ArrayRef<TemplateArgument> Args, 4819 QualType Underlying) const { 4820 assert(!Template.getAsDependentTemplateName() && 4821 "No dependent template names here!"); 4822 // Look through qualified template names. 4823 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4824 Template = TemplateName(QTN->getTemplateDecl()); 4825 4826 bool IsTypeAlias = 4827 Template.getAsTemplateDecl() && 4828 isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl()); 4829 QualType CanonType; 4830 if (!Underlying.isNull()) 4831 CanonType = getCanonicalType(Underlying); 4832 else { 4833 // We can get here with an alias template when the specialization contains 4834 // a pack expansion that does not match up with a parameter pack. 4835 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) && 4836 "Caller must compute aliased type"); 4837 IsTypeAlias = false; 4838 CanonType = getCanonicalTemplateSpecializationType(Template, Args); 4839 } 4840 4841 // Allocate the (non-canonical) template specialization type, but don't 4842 // try to unique it: these types typically have location information that 4843 // we don't unique and don't want to lose. 4844 void *Mem = Allocate(sizeof(TemplateSpecializationType) + 4845 sizeof(TemplateArgument) * Args.size() + 4846 (IsTypeAlias? sizeof(QualType) : 0), 4847 TypeAlignment); 4848 auto *Spec 4849 = new (Mem) TemplateSpecializationType(Template, Args, CanonType, 4850 IsTypeAlias ? Underlying : QualType()); 4851 4852 Types.push_back(Spec); 4853 return QualType(Spec, 0); 4854 } 4855 4856 static bool 4857 getCanonicalTemplateArguments(const ASTContext &C, 4858 ArrayRef<TemplateArgument> OrigArgs, 4859 SmallVectorImpl<TemplateArgument> &CanonArgs) { 4860 bool AnyNonCanonArgs = false; 4861 unsigned NumArgs = OrigArgs.size(); 4862 CanonArgs.resize(NumArgs); 4863 for (unsigned I = 0; I != NumArgs; ++I) { 4864 const TemplateArgument &OrigArg = OrigArgs[I]; 4865 TemplateArgument &CanonArg = CanonArgs[I]; 4866 CanonArg = C.getCanonicalTemplateArgument(OrigArg); 4867 if (!CanonArg.structurallyEquals(OrigArg)) 4868 AnyNonCanonArgs = true; 4869 } 4870 return AnyNonCanonArgs; 4871 } 4872 4873 QualType ASTContext::getCanonicalTemplateSpecializationType( 4874 TemplateName Template, ArrayRef<TemplateArgument> Args) const { 4875 assert(!Template.getAsDependentTemplateName() && 4876 "No dependent template names here!"); 4877 4878 // Look through qualified template names. 4879 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4880 Template = TemplateName(QTN->getTemplateDecl()); 4881 4882 // Build the canonical template specialization type. 4883 TemplateName CanonTemplate = getCanonicalTemplateName(Template); 4884 SmallVector<TemplateArgument, 4> CanonArgs; 4885 ::getCanonicalTemplateArguments(*this, Args, CanonArgs); 4886 4887 // Determine whether this canonical template specialization type already 4888 // exists. 4889 llvm::FoldingSetNodeID ID; 4890 TemplateSpecializationType::Profile(ID, CanonTemplate, 4891 CanonArgs, *this); 4892 4893 void *InsertPos = nullptr; 4894 TemplateSpecializationType *Spec 4895 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 4896 4897 if (!Spec) { 4898 // Allocate a new canonical template specialization type. 4899 void *Mem = Allocate((sizeof(TemplateSpecializationType) + 4900 sizeof(TemplateArgument) * CanonArgs.size()), 4901 TypeAlignment); 4902 Spec = new (Mem) TemplateSpecializationType(CanonTemplate, 4903 CanonArgs, 4904 QualType(), QualType()); 4905 Types.push_back(Spec); 4906 TemplateSpecializationTypes.InsertNode(Spec, InsertPos); 4907 } 4908 4909 assert(Spec->isDependentType() && 4910 "Non-dependent template-id type must have a canonical type"); 4911 return QualType(Spec, 0); 4912 } 4913 4914 QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, 4915 NestedNameSpecifier *NNS, 4916 QualType NamedType, 4917 TagDecl *OwnedTagDecl) const { 4918 llvm::FoldingSetNodeID ID; 4919 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); 4920 4921 void *InsertPos = nullptr; 4922 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 4923 if (T) 4924 return QualType(T, 0); 4925 4926 QualType Canon = NamedType; 4927 if (!Canon.isCanonical()) { 4928 Canon = getCanonicalType(NamedType); 4929 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 4930 assert(!CheckT && "Elaborated canonical type broken"); 4931 (void)CheckT; 4932 } 4933 4934 void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), 4935 TypeAlignment); 4936 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); 4937 4938 Types.push_back(T); 4939 ElaboratedTypes.InsertNode(T, InsertPos); 4940 return QualType(T, 0); 4941 } 4942 4943 QualType 4944 ASTContext::getParenType(QualType InnerType) const { 4945 llvm::FoldingSetNodeID ID; 4946 ParenType::Profile(ID, InnerType); 4947 4948 void *InsertPos = nullptr; 4949 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 4950 if (T) 4951 return QualType(T, 0); 4952 4953 QualType Canon = InnerType; 4954 if (!Canon.isCanonical()) { 4955 Canon = getCanonicalType(InnerType); 4956 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 4957 assert(!CheckT && "Paren canonical type broken"); 4958 (void)CheckT; 4959 } 4960 4961 T = new (*this, TypeAlignment) ParenType(InnerType, Canon); 4962 Types.push_back(T); 4963 ParenTypes.InsertNode(T, InsertPos); 4964 return QualType(T, 0); 4965 } 4966 4967 QualType 4968 ASTContext::getMacroQualifiedType(QualType UnderlyingTy, 4969 const IdentifierInfo *MacroII) const { 4970 QualType Canon = UnderlyingTy; 4971 if (!Canon.isCanonical()) 4972 Canon = getCanonicalType(UnderlyingTy); 4973 4974 auto *newType = new (*this, TypeAlignment) 4975 MacroQualifiedType(UnderlyingTy, Canon, MacroII); 4976 Types.push_back(newType); 4977 return QualType(newType, 0); 4978 } 4979 4980 QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, 4981 NestedNameSpecifier *NNS, 4982 const IdentifierInfo *Name, 4983 QualType Canon) const { 4984 if (Canon.isNull()) { 4985 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 4986 if (CanonNNS != NNS) 4987 Canon = getDependentNameType(Keyword, CanonNNS, Name); 4988 } 4989 4990 llvm::FoldingSetNodeID ID; 4991 DependentNameType::Profile(ID, Keyword, NNS, Name); 4992 4993 void *InsertPos = nullptr; 4994 DependentNameType *T 4995 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); 4996 if (T) 4997 return QualType(T, 0); 4998 4999 T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon); 5000 Types.push_back(T); 5001 DependentNameTypes.InsertNode(T, InsertPos); 5002 return QualType(T, 0); 5003 } 5004 5005 QualType 5006 ASTContext::getDependentTemplateSpecializationType( 5007 ElaboratedTypeKeyword Keyword, 5008 NestedNameSpecifier *NNS, 5009 const IdentifierInfo *Name, 5010 const TemplateArgumentListInfo &Args) const { 5011 // TODO: avoid this copy 5012 SmallVector<TemplateArgument, 16> ArgCopy; 5013 for (unsigned I = 0, E = Args.size(); I != E; ++I) 5014 ArgCopy.push_back(Args[I].getArgument()); 5015 return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); 5016 } 5017 5018 QualType 5019 ASTContext::getDependentTemplateSpecializationType( 5020 ElaboratedTypeKeyword Keyword, 5021 NestedNameSpecifier *NNS, 5022 const IdentifierInfo *Name, 5023 ArrayRef<TemplateArgument> Args) const { 5024 assert((!NNS || NNS->isDependent()) && 5025 "nested-name-specifier must be dependent"); 5026 5027 llvm::FoldingSetNodeID ID; 5028 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, 5029 Name, Args); 5030 5031 void *InsertPos = nullptr; 5032 DependentTemplateSpecializationType *T 5033 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5034 if (T) 5035 return QualType(T, 0); 5036 5037 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5038 5039 ElaboratedTypeKeyword CanonKeyword = Keyword; 5040 if (Keyword == ETK_None) CanonKeyword = ETK_Typename; 5041 5042 SmallVector<TemplateArgument, 16> CanonArgs; 5043 bool AnyNonCanonArgs = 5044 ::getCanonicalTemplateArguments(*this, Args, CanonArgs); 5045 5046 QualType Canon; 5047 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { 5048 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, 5049 Name, 5050 CanonArgs); 5051 5052 // Find the insert position again. 5053 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5054 } 5055 5056 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + 5057 sizeof(TemplateArgument) * Args.size()), 5058 TypeAlignment); 5059 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, 5060 Name, Args, Canon); 5061 Types.push_back(T); 5062 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); 5063 return QualType(T, 0); 5064 } 5065 5066 TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { 5067 TemplateArgument Arg; 5068 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { 5069 QualType ArgType = getTypeDeclType(TTP); 5070 if (TTP->isParameterPack()) 5071 ArgType = getPackExpansionType(ArgType, None); 5072 5073 Arg = TemplateArgument(ArgType); 5074 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { 5075 QualType T = 5076 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this); 5077 // For class NTTPs, ensure we include the 'const' so the type matches that 5078 // of a real template argument. 5079 // FIXME: It would be more faithful to model this as something like an 5080 // lvalue-to-rvalue conversion applied to a const-qualified lvalue. 5081 if (T->isRecordType()) 5082 T.addConst(); 5083 Expr *E = new (*this) DeclRefExpr( 5084 *this, NTTP, /*enclosing*/ false, T, 5085 Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); 5086 5087 if (NTTP->isParameterPack()) 5088 E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(), 5089 None); 5090 Arg = TemplateArgument(E); 5091 } else { 5092 auto *TTP = cast<TemplateTemplateParmDecl>(Param); 5093 if (TTP->isParameterPack()) 5094 Arg = TemplateArgument(TemplateName(TTP), Optional<unsigned>()); 5095 else 5096 Arg = TemplateArgument(TemplateName(TTP)); 5097 } 5098 5099 if (Param->isTemplateParameterPack()) 5100 Arg = TemplateArgument::CreatePackCopy(*this, Arg); 5101 5102 return Arg; 5103 } 5104 5105 void 5106 ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, 5107 SmallVectorImpl<TemplateArgument> &Args) { 5108 Args.reserve(Args.size() + Params->size()); 5109 5110 for (NamedDecl *Param : *Params) 5111 Args.push_back(getInjectedTemplateArg(Param)); 5112 } 5113 5114 QualType ASTContext::getPackExpansionType(QualType Pattern, 5115 Optional<unsigned> NumExpansions, 5116 bool ExpectPackInType) { 5117 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && 5118 "Pack expansions must expand one or more parameter packs"); 5119 5120 llvm::FoldingSetNodeID ID; 5121 PackExpansionType::Profile(ID, Pattern, NumExpansions); 5122 5123 void *InsertPos = nullptr; 5124 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5125 if (T) 5126 return QualType(T, 0); 5127 5128 QualType Canon; 5129 if (!Pattern.isCanonical()) { 5130 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, 5131 /*ExpectPackInType=*/false); 5132 5133 // Find the insert position again, in case we inserted an element into 5134 // PackExpansionTypes and invalidated our insert position. 5135 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5136 } 5137 5138 T = new (*this, TypeAlignment) 5139 PackExpansionType(Pattern, Canon, NumExpansions); 5140 Types.push_back(T); 5141 PackExpansionTypes.InsertNode(T, InsertPos); 5142 return QualType(T, 0); 5143 } 5144 5145 /// CmpProtocolNames - Comparison predicate for sorting protocols 5146 /// alphabetically. 5147 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, 5148 ObjCProtocolDecl *const *RHS) { 5149 return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); 5150 } 5151 5152 static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) { 5153 if (Protocols.empty()) return true; 5154 5155 if (Protocols[0]->getCanonicalDecl() != Protocols[0]) 5156 return false; 5157 5158 for (unsigned i = 1; i != Protocols.size(); ++i) 5159 if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || 5160 Protocols[i]->getCanonicalDecl() != Protocols[i]) 5161 return false; 5162 return true; 5163 } 5164 5165 static void 5166 SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) { 5167 // Sort protocols, keyed by name. 5168 llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); 5169 5170 // Canonicalize. 5171 for (ObjCProtocolDecl *&P : Protocols) 5172 P = P->getCanonicalDecl(); 5173 5174 // Remove duplicates. 5175 auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); 5176 Protocols.erase(ProtocolsEnd, Protocols.end()); 5177 } 5178 5179 QualType ASTContext::getObjCObjectType(QualType BaseType, 5180 ObjCProtocolDecl * const *Protocols, 5181 unsigned NumProtocols) const { 5182 return getObjCObjectType(BaseType, {}, 5183 llvm::makeArrayRef(Protocols, NumProtocols), 5184 /*isKindOf=*/false); 5185 } 5186 5187 QualType ASTContext::getObjCObjectType( 5188 QualType baseType, 5189 ArrayRef<QualType> typeArgs, 5190 ArrayRef<ObjCProtocolDecl *> protocols, 5191 bool isKindOf) const { 5192 // If the base type is an interface and there aren't any protocols or 5193 // type arguments to add, then the interface type will do just fine. 5194 if (typeArgs.empty() && protocols.empty() && !isKindOf && 5195 isa<ObjCInterfaceType>(baseType)) 5196 return baseType; 5197 5198 // Look in the folding set for an existing type. 5199 llvm::FoldingSetNodeID ID; 5200 ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); 5201 void *InsertPos = nullptr; 5202 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) 5203 return QualType(QT, 0); 5204 5205 // Determine the type arguments to be used for canonicalization, 5206 // which may be explicitly specified here or written on the base 5207 // type. 5208 ArrayRef<QualType> effectiveTypeArgs = typeArgs; 5209 if (effectiveTypeArgs.empty()) { 5210 if (const auto *baseObject = baseType->getAs<ObjCObjectType>()) 5211 effectiveTypeArgs = baseObject->getTypeArgs(); 5212 } 5213 5214 // Build the canonical type, which has the canonical base type and a 5215 // sorted-and-uniqued list of protocols and the type arguments 5216 // canonicalized. 5217 QualType canonical; 5218 bool typeArgsAreCanonical = llvm::all_of( 5219 effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); }); 5220 bool protocolsSorted = areSortedAndUniqued(protocols); 5221 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { 5222 // Determine the canonical type arguments. 5223 ArrayRef<QualType> canonTypeArgs; 5224 SmallVector<QualType, 4> canonTypeArgsVec; 5225 if (!typeArgsAreCanonical) { 5226 canonTypeArgsVec.reserve(effectiveTypeArgs.size()); 5227 for (auto typeArg : effectiveTypeArgs) 5228 canonTypeArgsVec.push_back(getCanonicalType(typeArg)); 5229 canonTypeArgs = canonTypeArgsVec; 5230 } else { 5231 canonTypeArgs = effectiveTypeArgs; 5232 } 5233 5234 ArrayRef<ObjCProtocolDecl *> canonProtocols; 5235 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec; 5236 if (!protocolsSorted) { 5237 canonProtocolsVec.append(protocols.begin(), protocols.end()); 5238 SortAndUniqueProtocols(canonProtocolsVec); 5239 canonProtocols = canonProtocolsVec; 5240 } else { 5241 canonProtocols = protocols; 5242 } 5243 5244 canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, 5245 canonProtocols, isKindOf); 5246 5247 // Regenerate InsertPos. 5248 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); 5249 } 5250 5251 unsigned size = sizeof(ObjCObjectTypeImpl); 5252 size += typeArgs.size() * sizeof(QualType); 5253 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5254 void *mem = Allocate(size, TypeAlignment); 5255 auto *T = 5256 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, 5257 isKindOf); 5258 5259 Types.push_back(T); 5260 ObjCObjectTypes.InsertNode(T, InsertPos); 5261 return QualType(T, 0); 5262 } 5263 5264 /// Apply Objective-C protocol qualifiers to the given type. 5265 /// If this is for the canonical type of a type parameter, we can apply 5266 /// protocol qualifiers on the ObjCObjectPointerType. 5267 QualType 5268 ASTContext::applyObjCProtocolQualifiers(QualType type, 5269 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError, 5270 bool allowOnPointerType) const { 5271 hasError = false; 5272 5273 if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) { 5274 return getObjCTypeParamType(objT->getDecl(), protocols); 5275 } 5276 5277 // Apply protocol qualifiers to ObjCObjectPointerType. 5278 if (allowOnPointerType) { 5279 if (const auto *objPtr = 5280 dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) { 5281 const ObjCObjectType *objT = objPtr->getObjectType(); 5282 // Merge protocol lists and construct ObjCObjectType. 5283 SmallVector<ObjCProtocolDecl*, 8> protocolsVec; 5284 protocolsVec.append(objT->qual_begin(), 5285 objT->qual_end()); 5286 protocolsVec.append(protocols.begin(), protocols.end()); 5287 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec; 5288 type = getObjCObjectType( 5289 objT->getBaseType(), 5290 objT->getTypeArgsAsWritten(), 5291 protocols, 5292 objT->isKindOfTypeAsWritten()); 5293 return getObjCObjectPointerType(type); 5294 } 5295 } 5296 5297 // Apply protocol qualifiers to ObjCObjectType. 5298 if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){ 5299 // FIXME: Check for protocols to which the class type is already 5300 // known to conform. 5301 5302 return getObjCObjectType(objT->getBaseType(), 5303 objT->getTypeArgsAsWritten(), 5304 protocols, 5305 objT->isKindOfTypeAsWritten()); 5306 } 5307 5308 // If the canonical type is ObjCObjectType, ... 5309 if (type->isObjCObjectType()) { 5310 // Silently overwrite any existing protocol qualifiers. 5311 // TODO: determine whether that's the right thing to do. 5312 5313 // FIXME: Check for protocols to which the class type is already 5314 // known to conform. 5315 return getObjCObjectType(type, {}, protocols, false); 5316 } 5317 5318 // id<protocol-list> 5319 if (type->isObjCIdType()) { 5320 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5321 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, 5322 objPtr->isKindOfType()); 5323 return getObjCObjectPointerType(type); 5324 } 5325 5326 // Class<protocol-list> 5327 if (type->isObjCClassType()) { 5328 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5329 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, 5330 objPtr->isKindOfType()); 5331 return getObjCObjectPointerType(type); 5332 } 5333 5334 hasError = true; 5335 return type; 5336 } 5337 5338 QualType 5339 ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, 5340 ArrayRef<ObjCProtocolDecl *> protocols) const { 5341 // Look in the folding set for an existing type. 5342 llvm::FoldingSetNodeID ID; 5343 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); 5344 void *InsertPos = nullptr; 5345 if (ObjCTypeParamType *TypeParam = 5346 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) 5347 return QualType(TypeParam, 0); 5348 5349 // We canonicalize to the underlying type. 5350 QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); 5351 if (!protocols.empty()) { 5352 // Apply the protocol qualifers. 5353 bool hasError; 5354 Canonical = getCanonicalType(applyObjCProtocolQualifiers( 5355 Canonical, protocols, hasError, true /*allowOnPointerType*/)); 5356 assert(!hasError && "Error when apply protocol qualifier to bound type"); 5357 } 5358 5359 unsigned size = sizeof(ObjCTypeParamType); 5360 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5361 void *mem = Allocate(size, TypeAlignment); 5362 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); 5363 5364 Types.push_back(newType); 5365 ObjCTypeParamTypes.InsertNode(newType, InsertPos); 5366 return QualType(newType, 0); 5367 } 5368 5369 void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, 5370 ObjCTypeParamDecl *New) const { 5371 New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); 5372 // Update TypeForDecl after updating TypeSourceInfo. 5373 auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl()); 5374 SmallVector<ObjCProtocolDecl *, 8> protocols; 5375 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); 5376 QualType UpdatedTy = getObjCTypeParamType(New, protocols); 5377 New->setTypeForDecl(UpdatedTy.getTypePtr()); 5378 } 5379 5380 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's 5381 /// protocol list adopt all protocols in QT's qualified-id protocol 5382 /// list. 5383 bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, 5384 ObjCInterfaceDecl *IC) { 5385 if (!QT->isObjCQualifiedIdType()) 5386 return false; 5387 5388 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) { 5389 // If both the right and left sides have qualifiers. 5390 for (auto *Proto : OPT->quals()) { 5391 if (!IC->ClassImplementsProtocol(Proto, false)) 5392 return false; 5393 } 5394 return true; 5395 } 5396 return false; 5397 } 5398 5399 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in 5400 /// QT's qualified-id protocol list adopt all protocols in IDecl's list 5401 /// of protocols. 5402 bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, 5403 ObjCInterfaceDecl *IDecl) { 5404 if (!QT->isObjCQualifiedIdType()) 5405 return false; 5406 const auto *OPT = QT->getAs<ObjCObjectPointerType>(); 5407 if (!OPT) 5408 return false; 5409 if (!IDecl->hasDefinition()) 5410 return false; 5411 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols; 5412 CollectInheritedProtocols(IDecl, InheritedProtocols); 5413 if (InheritedProtocols.empty()) 5414 return false; 5415 // Check that if every protocol in list of id<plist> conforms to a protocol 5416 // of IDecl's, then bridge casting is ok. 5417 bool Conforms = false; 5418 for (auto *Proto : OPT->quals()) { 5419 Conforms = false; 5420 for (auto *PI : InheritedProtocols) { 5421 if (ProtocolCompatibleWithProtocol(Proto, PI)) { 5422 Conforms = true; 5423 break; 5424 } 5425 } 5426 if (!Conforms) 5427 break; 5428 } 5429 if (Conforms) 5430 return true; 5431 5432 for (auto *PI : InheritedProtocols) { 5433 // If both the right and left sides have qualifiers. 5434 bool Adopts = false; 5435 for (auto *Proto : OPT->quals()) { 5436 // return 'true' if 'PI' is in the inheritance hierarchy of Proto 5437 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) 5438 break; 5439 } 5440 if (!Adopts) 5441 return false; 5442 } 5443 return true; 5444 } 5445 5446 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for 5447 /// the given object type. 5448 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { 5449 llvm::FoldingSetNodeID ID; 5450 ObjCObjectPointerType::Profile(ID, ObjectT); 5451 5452 void *InsertPos = nullptr; 5453 if (ObjCObjectPointerType *QT = 5454 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 5455 return QualType(QT, 0); 5456 5457 // Find the canonical object type. 5458 QualType Canonical; 5459 if (!ObjectT.isCanonical()) { 5460 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); 5461 5462 // Regenerate InsertPos. 5463 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 5464 } 5465 5466 // No match. 5467 void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment); 5468 auto *QType = 5469 new (Mem) ObjCObjectPointerType(Canonical, ObjectT); 5470 5471 Types.push_back(QType); 5472 ObjCObjectPointerTypes.InsertNode(QType, InsertPos); 5473 return QualType(QType, 0); 5474 } 5475 5476 /// getObjCInterfaceType - Return the unique reference to the type for the 5477 /// specified ObjC interface decl. The list of protocols is optional. 5478 QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, 5479 ObjCInterfaceDecl *PrevDecl) const { 5480 if (Decl->TypeForDecl) 5481 return QualType(Decl->TypeForDecl, 0); 5482 5483 if (PrevDecl) { 5484 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); 5485 Decl->TypeForDecl = PrevDecl->TypeForDecl; 5486 return QualType(PrevDecl->TypeForDecl, 0); 5487 } 5488 5489 // Prefer the definition, if there is one. 5490 if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) 5491 Decl = Def; 5492 5493 void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment); 5494 auto *T = new (Mem) ObjCInterfaceType(Decl); 5495 Decl->TypeForDecl = T; 5496 Types.push_back(T); 5497 return QualType(T, 0); 5498 } 5499 5500 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique 5501 /// TypeOfExprType AST's (since expression's are never shared). For example, 5502 /// multiple declarations that refer to "typeof(x)" all contain different 5503 /// DeclRefExpr's. This doesn't effect the type checker, since it operates 5504 /// on canonical type's (which are always unique). 5505 QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const { 5506 TypeOfExprType *toe; 5507 if (tofExpr->isTypeDependent()) { 5508 llvm::FoldingSetNodeID ID; 5509 DependentTypeOfExprType::Profile(ID, *this, tofExpr); 5510 5511 void *InsertPos = nullptr; 5512 DependentTypeOfExprType *Canon 5513 = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); 5514 if (Canon) { 5515 // We already have a "canonical" version of an identical, dependent 5516 // typeof(expr) type. Use that as our canonical type. 5517 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, 5518 QualType((TypeOfExprType*)Canon, 0)); 5519 } else { 5520 // Build a new, canonical typeof(expr) type. 5521 Canon 5522 = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr); 5523 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); 5524 toe = Canon; 5525 } 5526 } else { 5527 QualType Canonical = getCanonicalType(tofExpr->getType()); 5528 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical); 5529 } 5530 Types.push_back(toe); 5531 return QualType(toe, 0); 5532 } 5533 5534 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique 5535 /// TypeOfType nodes. The only motivation to unique these nodes would be 5536 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be 5537 /// an issue. This doesn't affect the type checker, since it operates 5538 /// on canonical types (which are always unique). 5539 QualType ASTContext::getTypeOfType(QualType tofType) const { 5540 QualType Canonical = getCanonicalType(tofType); 5541 auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical); 5542 Types.push_back(tot); 5543 return QualType(tot, 0); 5544 } 5545 5546 /// getReferenceQualifiedType - Given an expr, will return the type for 5547 /// that expression, as in [dcl.type.simple]p4 but without taking id-expressions 5548 /// and class member access into account. 5549 QualType ASTContext::getReferenceQualifiedType(const Expr *E) const { 5550 // C++11 [dcl.type.simple]p4: 5551 // [...] 5552 QualType T = E->getType(); 5553 switch (E->getValueKind()) { 5554 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the 5555 // type of e; 5556 case VK_XValue: 5557 return getRValueReferenceType(T); 5558 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the 5559 // type of e; 5560 case VK_LValue: 5561 return getLValueReferenceType(T); 5562 // - otherwise, decltype(e) is the type of e. 5563 case VK_PRValue: 5564 return T; 5565 } 5566 llvm_unreachable("Unknown value kind"); 5567 } 5568 5569 /// Unlike many "get<Type>" functions, we don't unique DecltypeType 5570 /// nodes. This would never be helpful, since each such type has its own 5571 /// expression, and would not give a significant memory saving, since there 5572 /// is an Expr tree under each such type. 5573 QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { 5574 DecltypeType *dt; 5575 5576 // C++11 [temp.type]p2: 5577 // If an expression e involves a template parameter, decltype(e) denotes a 5578 // unique dependent type. Two such decltype-specifiers refer to the same 5579 // type only if their expressions are equivalent (14.5.6.1). 5580 if (e->isInstantiationDependent()) { 5581 llvm::FoldingSetNodeID ID; 5582 DependentDecltypeType::Profile(ID, *this, e); 5583 5584 void *InsertPos = nullptr; 5585 DependentDecltypeType *Canon 5586 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); 5587 if (!Canon) { 5588 // Build a new, canonical decltype(expr) type. 5589 Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e); 5590 DependentDecltypeTypes.InsertNode(Canon, InsertPos); 5591 } 5592 dt = new (*this, TypeAlignment) 5593 DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); 5594 } else { 5595 dt = new (*this, TypeAlignment) 5596 DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); 5597 } 5598 Types.push_back(dt); 5599 return QualType(dt, 0); 5600 } 5601 5602 /// getUnaryTransformationType - We don't unique these, since the memory 5603 /// savings are minimal and these are rare. 5604 QualType ASTContext::getUnaryTransformType(QualType BaseType, 5605 QualType UnderlyingType, 5606 UnaryTransformType::UTTKind Kind) 5607 const { 5608 UnaryTransformType *ut = nullptr; 5609 5610 if (BaseType->isDependentType()) { 5611 // Look in the folding set for an existing type. 5612 llvm::FoldingSetNodeID ID; 5613 DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); 5614 5615 void *InsertPos = nullptr; 5616 DependentUnaryTransformType *Canon 5617 = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); 5618 5619 if (!Canon) { 5620 // Build a new, canonical __underlying_type(type) type. 5621 Canon = new (*this, TypeAlignment) 5622 DependentUnaryTransformType(*this, getCanonicalType(BaseType), 5623 Kind); 5624 DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); 5625 } 5626 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5627 QualType(), Kind, 5628 QualType(Canon, 0)); 5629 } else { 5630 QualType CanonType = getCanonicalType(UnderlyingType); 5631 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5632 UnderlyingType, Kind, 5633 CanonType); 5634 } 5635 Types.push_back(ut); 5636 return QualType(ut, 0); 5637 } 5638 5639 QualType ASTContext::getAutoTypeInternal( 5640 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent, 5641 bool IsPack, ConceptDecl *TypeConstraintConcept, 5642 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const { 5643 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && 5644 !TypeConstraintConcept && !IsDependent) 5645 return getAutoDeductType(); 5646 5647 // Look in the folding set for an existing type. 5648 void *InsertPos = nullptr; 5649 llvm::FoldingSetNodeID ID; 5650 AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, 5651 TypeConstraintConcept, TypeConstraintArgs); 5652 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) 5653 return QualType(AT, 0); 5654 5655 QualType Canon; 5656 if (!IsCanon) { 5657 if (DeducedType.isNull()) { 5658 SmallVector<TemplateArgument, 4> CanonArgs; 5659 bool AnyNonCanonArgs = 5660 ::getCanonicalTemplateArguments(*this, TypeConstraintArgs, CanonArgs); 5661 if (AnyNonCanonArgs) { 5662 Canon = getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack, 5663 TypeConstraintConcept, CanonArgs, true); 5664 // Find the insert position again. 5665 AutoTypes.FindNodeOrInsertPos(ID, InsertPos); 5666 } 5667 } else { 5668 Canon = DeducedType.getCanonicalType(); 5669 } 5670 } 5671 5672 void *Mem = Allocate(sizeof(AutoType) + 5673 sizeof(TemplateArgument) * TypeConstraintArgs.size(), 5674 TypeAlignment); 5675 auto *AT = new (Mem) AutoType( 5676 DeducedType, Keyword, 5677 (IsDependent ? TypeDependence::DependentInstantiation 5678 : TypeDependence::None) | 5679 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), 5680 Canon, TypeConstraintConcept, TypeConstraintArgs); 5681 Types.push_back(AT); 5682 AutoTypes.InsertNode(AT, InsertPos); 5683 return QualType(AT, 0); 5684 } 5685 5686 /// getAutoType - Return the uniqued reference to the 'auto' type which has been 5687 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the 5688 /// canonical deduced-but-dependent 'auto' type. 5689 QualType 5690 ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, 5691 bool IsDependent, bool IsPack, 5692 ConceptDecl *TypeConstraintConcept, 5693 ArrayRef<TemplateArgument> TypeConstraintArgs) const { 5694 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); 5695 assert((!IsDependent || DeducedType.isNull()) && 5696 "A dependent auto should be undeduced"); 5697 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack, 5698 TypeConstraintConcept, TypeConstraintArgs); 5699 } 5700 5701 /// Return the uniqued reference to the deduced template specialization type 5702 /// which has been deduced to the given type, or to the canonical undeduced 5703 /// such type, or the canonical deduced-but-dependent such type. 5704 QualType ASTContext::getDeducedTemplateSpecializationType( 5705 TemplateName Template, QualType DeducedType, bool IsDependent) const { 5706 // Look in the folding set for an existing type. 5707 void *InsertPos = nullptr; 5708 llvm::FoldingSetNodeID ID; 5709 DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, 5710 IsDependent); 5711 if (DeducedTemplateSpecializationType *DTST = 5712 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) 5713 return QualType(DTST, 0); 5714 5715 auto *DTST = new (*this, TypeAlignment) 5716 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); 5717 llvm::FoldingSetNodeID TempID; 5718 DTST->Profile(TempID); 5719 assert(ID == TempID && "ID does not match"); 5720 Types.push_back(DTST); 5721 DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); 5722 return QualType(DTST, 0); 5723 } 5724 5725 /// getAtomicType - Return the uniqued reference to the atomic type for 5726 /// the given value type. 5727 QualType ASTContext::getAtomicType(QualType T) const { 5728 // Unique pointers, to guarantee there is only one pointer of a particular 5729 // structure. 5730 llvm::FoldingSetNodeID ID; 5731 AtomicType::Profile(ID, T); 5732 5733 void *InsertPos = nullptr; 5734 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) 5735 return QualType(AT, 0); 5736 5737 // If the atomic value type isn't canonical, this won't be a canonical type 5738 // either, so fill in the canonical type field. 5739 QualType Canonical; 5740 if (!T.isCanonical()) { 5741 Canonical = getAtomicType(getCanonicalType(T)); 5742 5743 // Get the new insert position for the node we care about. 5744 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); 5745 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 5746 } 5747 auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical); 5748 Types.push_back(New); 5749 AtomicTypes.InsertNode(New, InsertPos); 5750 return QualType(New, 0); 5751 } 5752 5753 /// getAutoDeductType - Get type pattern for deducing against 'auto'. 5754 QualType ASTContext::getAutoDeductType() const { 5755 if (AutoDeductTy.isNull()) 5756 AutoDeductTy = QualType(new (*this, TypeAlignment) 5757 AutoType(QualType(), AutoTypeKeyword::Auto, 5758 TypeDependence::None, QualType(), 5759 /*concept*/ nullptr, /*args*/ {}), 5760 0); 5761 return AutoDeductTy; 5762 } 5763 5764 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. 5765 QualType ASTContext::getAutoRRefDeductType() const { 5766 if (AutoRRefDeductTy.isNull()) 5767 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); 5768 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); 5769 return AutoRRefDeductTy; 5770 } 5771 5772 /// getTagDeclType - Return the unique reference to the type for the 5773 /// specified TagDecl (struct/union/class/enum) decl. 5774 QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { 5775 assert(Decl); 5776 // FIXME: What is the design on getTagDeclType when it requires casting 5777 // away const? mutable? 5778 return getTypeDeclType(const_cast<TagDecl*>(Decl)); 5779 } 5780 5781 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result 5782 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and 5783 /// needs to agree with the definition in <stddef.h>. 5784 CanQualType ASTContext::getSizeType() const { 5785 return getFromTargetType(Target->getSizeType()); 5786 } 5787 5788 /// Return the unique signed counterpart of the integer type 5789 /// corresponding to size_t. 5790 CanQualType ASTContext::getSignedSizeType() const { 5791 return getFromTargetType(Target->getSignedSizeType()); 5792 } 5793 5794 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). 5795 CanQualType ASTContext::getIntMaxType() const { 5796 return getFromTargetType(Target->getIntMaxType()); 5797 } 5798 5799 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). 5800 CanQualType ASTContext::getUIntMaxType() const { 5801 return getFromTargetType(Target->getUIntMaxType()); 5802 } 5803 5804 /// getSignedWCharType - Return the type of "signed wchar_t". 5805 /// Used when in C++, as a GCC extension. 5806 QualType ASTContext::getSignedWCharType() const { 5807 // FIXME: derive from "Target" ? 5808 return WCharTy; 5809 } 5810 5811 /// getUnsignedWCharType - Return the type of "unsigned wchar_t". 5812 /// Used when in C++, as a GCC extension. 5813 QualType ASTContext::getUnsignedWCharType() const { 5814 // FIXME: derive from "Target" ? 5815 return UnsignedIntTy; 5816 } 5817 5818 QualType ASTContext::getIntPtrType() const { 5819 return getFromTargetType(Target->getIntPtrType()); 5820 } 5821 5822 QualType ASTContext::getUIntPtrType() const { 5823 return getCorrespondingUnsignedType(getIntPtrType()); 5824 } 5825 5826 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) 5827 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). 5828 QualType ASTContext::getPointerDiffType() const { 5829 return getFromTargetType(Target->getPtrDiffType(0)); 5830 } 5831 5832 /// Return the unique unsigned counterpart of "ptrdiff_t" 5833 /// integer type. The standard (C11 7.21.6.1p7) refers to this type 5834 /// in the definition of %tu format specifier. 5835 QualType ASTContext::getUnsignedPointerDiffType() const { 5836 return getFromTargetType(Target->getUnsignedPtrDiffType(0)); 5837 } 5838 5839 /// Return the unique type for "pid_t" defined in 5840 /// <sys/types.h>. We need this to compute the correct type for vfork(). 5841 QualType ASTContext::getProcessIDType() const { 5842 return getFromTargetType(Target->getProcessIDType()); 5843 } 5844 5845 //===----------------------------------------------------------------------===// 5846 // Type Operators 5847 //===----------------------------------------------------------------------===// 5848 5849 CanQualType ASTContext::getCanonicalParamType(QualType T) const { 5850 // Push qualifiers into arrays, and then discard any remaining 5851 // qualifiers. 5852 T = getCanonicalType(T); 5853 T = getVariableArrayDecayedType(T); 5854 const Type *Ty = T.getTypePtr(); 5855 QualType Result; 5856 if (isa<ArrayType>(Ty)) { 5857 Result = getArrayDecayedType(QualType(Ty,0)); 5858 } else if (isa<FunctionType>(Ty)) { 5859 Result = getPointerType(QualType(Ty, 0)); 5860 } else { 5861 Result = QualType(Ty, 0); 5862 } 5863 5864 return CanQualType::CreateUnsafe(Result); 5865 } 5866 5867 QualType ASTContext::getUnqualifiedArrayType(QualType type, 5868 Qualifiers &quals) { 5869 SplitQualType splitType = type.getSplitUnqualifiedType(); 5870 5871 // FIXME: getSplitUnqualifiedType() actually walks all the way to 5872 // the unqualified desugared type and then drops it on the floor. 5873 // We then have to strip that sugar back off with 5874 // getUnqualifiedDesugaredType(), which is silly. 5875 const auto *AT = 5876 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); 5877 5878 // If we don't have an array, just use the results in splitType. 5879 if (!AT) { 5880 quals = splitType.Quals; 5881 return QualType(splitType.Ty, 0); 5882 } 5883 5884 // Otherwise, recurse on the array's element type. 5885 QualType elementType = AT->getElementType(); 5886 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); 5887 5888 // If that didn't change the element type, AT has no qualifiers, so we 5889 // can just use the results in splitType. 5890 if (elementType == unqualElementType) { 5891 assert(quals.empty()); // from the recursive call 5892 quals = splitType.Quals; 5893 return QualType(splitType.Ty, 0); 5894 } 5895 5896 // Otherwise, add in the qualifiers from the outermost type, then 5897 // build the type back up. 5898 quals.addConsistentQualifiers(splitType.Quals); 5899 5900 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 5901 return getConstantArrayType(unqualElementType, CAT->getSize(), 5902 CAT->getSizeExpr(), CAT->getSizeModifier(), 0); 5903 } 5904 5905 if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) { 5906 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); 5907 } 5908 5909 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) { 5910 return getVariableArrayType(unqualElementType, 5911 VAT->getSizeExpr(), 5912 VAT->getSizeModifier(), 5913 VAT->getIndexTypeCVRQualifiers(), 5914 VAT->getBracketsRange()); 5915 } 5916 5917 const auto *DSAT = cast<DependentSizedArrayType>(AT); 5918 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), 5919 DSAT->getSizeModifier(), 0, 5920 SourceRange()); 5921 } 5922 5923 /// Attempt to unwrap two types that may both be array types with the same bound 5924 /// (or both be array types of unknown bound) for the purpose of comparing the 5925 /// cv-decomposition of two types per C++ [conv.qual]. 5926 /// 5927 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 5928 /// C++20 [conv.qual], if permitted by the current language mode. 5929 void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2, 5930 bool AllowPiMismatch) { 5931 while (true) { 5932 auto *AT1 = getAsArrayType(T1); 5933 if (!AT1) 5934 return; 5935 5936 auto *AT2 = getAsArrayType(T2); 5937 if (!AT2) 5938 return; 5939 5940 // If we don't have two array types with the same constant bound nor two 5941 // incomplete array types, we've unwrapped everything we can. 5942 // C++20 also permits one type to be a constant array type and the other 5943 // to be an incomplete array type. 5944 // FIXME: Consider also unwrapping array of unknown bound and VLA. 5945 if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) { 5946 auto *CAT2 = dyn_cast<ConstantArrayType>(AT2); 5947 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) || 5948 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 5949 isa<IncompleteArrayType>(AT2)))) 5950 return; 5951 } else if (isa<IncompleteArrayType>(AT1)) { 5952 if (!(isa<IncompleteArrayType>(AT2) || 5953 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 5954 isa<ConstantArrayType>(AT2)))) 5955 return; 5956 } else { 5957 return; 5958 } 5959 5960 T1 = AT1->getElementType(); 5961 T2 = AT2->getElementType(); 5962 } 5963 } 5964 5965 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). 5966 /// 5967 /// If T1 and T2 are both pointer types of the same kind, or both array types 5968 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is 5969 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. 5970 /// 5971 /// This function will typically be called in a loop that successively 5972 /// "unwraps" pointer and pointer-to-member types to compare them at each 5973 /// level. 5974 /// 5975 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 5976 /// C++20 [conv.qual], if permitted by the current language mode. 5977 /// 5978 /// \return \c true if a pointer type was unwrapped, \c false if we reached a 5979 /// pair of types that can't be unwrapped further. 5980 bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2, 5981 bool AllowPiMismatch) { 5982 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch); 5983 5984 const auto *T1PtrType = T1->getAs<PointerType>(); 5985 const auto *T2PtrType = T2->getAs<PointerType>(); 5986 if (T1PtrType && T2PtrType) { 5987 T1 = T1PtrType->getPointeeType(); 5988 T2 = T2PtrType->getPointeeType(); 5989 return true; 5990 } 5991 5992 const auto *T1MPType = T1->getAs<MemberPointerType>(); 5993 const auto *T2MPType = T2->getAs<MemberPointerType>(); 5994 if (T1MPType && T2MPType && 5995 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), 5996 QualType(T2MPType->getClass(), 0))) { 5997 T1 = T1MPType->getPointeeType(); 5998 T2 = T2MPType->getPointeeType(); 5999 return true; 6000 } 6001 6002 if (getLangOpts().ObjC) { 6003 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>(); 6004 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>(); 6005 if (T1OPType && T2OPType) { 6006 T1 = T1OPType->getPointeeType(); 6007 T2 = T2OPType->getPointeeType(); 6008 return true; 6009 } 6010 } 6011 6012 // FIXME: Block pointers, too? 6013 6014 return false; 6015 } 6016 6017 bool ASTContext::hasSimilarType(QualType T1, QualType T2) { 6018 while (true) { 6019 Qualifiers Quals; 6020 T1 = getUnqualifiedArrayType(T1, Quals); 6021 T2 = getUnqualifiedArrayType(T2, Quals); 6022 if (hasSameType(T1, T2)) 6023 return true; 6024 if (!UnwrapSimilarTypes(T1, T2)) 6025 return false; 6026 } 6027 } 6028 6029 bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { 6030 while (true) { 6031 Qualifiers Quals1, Quals2; 6032 T1 = getUnqualifiedArrayType(T1, Quals1); 6033 T2 = getUnqualifiedArrayType(T2, Quals2); 6034 6035 Quals1.removeCVRQualifiers(); 6036 Quals2.removeCVRQualifiers(); 6037 if (Quals1 != Quals2) 6038 return false; 6039 6040 if (hasSameType(T1, T2)) 6041 return true; 6042 6043 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false)) 6044 return false; 6045 } 6046 } 6047 6048 DeclarationNameInfo 6049 ASTContext::getNameForTemplate(TemplateName Name, 6050 SourceLocation NameLoc) const { 6051 switch (Name.getKind()) { 6052 case TemplateName::QualifiedTemplate: 6053 case TemplateName::Template: 6054 // DNInfo work in progress: CHECKME: what about DNLoc? 6055 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), 6056 NameLoc); 6057 6058 case TemplateName::OverloadedTemplate: { 6059 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); 6060 // DNInfo work in progress: CHECKME: what about DNLoc? 6061 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); 6062 } 6063 6064 case TemplateName::AssumedTemplate: { 6065 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); 6066 return DeclarationNameInfo(Storage->getDeclName(), NameLoc); 6067 } 6068 6069 case TemplateName::DependentTemplate: { 6070 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6071 DeclarationName DName; 6072 if (DTN->isIdentifier()) { 6073 DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); 6074 return DeclarationNameInfo(DName, NameLoc); 6075 } else { 6076 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); 6077 // DNInfo work in progress: FIXME: source locations? 6078 DeclarationNameLoc DNLoc = 6079 DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange()); 6080 return DeclarationNameInfo(DName, NameLoc, DNLoc); 6081 } 6082 } 6083 6084 case TemplateName::SubstTemplateTemplateParm: { 6085 SubstTemplateTemplateParmStorage *subst 6086 = Name.getAsSubstTemplateTemplateParm(); 6087 return DeclarationNameInfo(subst->getParameter()->getDeclName(), 6088 NameLoc); 6089 } 6090 6091 case TemplateName::SubstTemplateTemplateParmPack: { 6092 SubstTemplateTemplateParmPackStorage *subst 6093 = Name.getAsSubstTemplateTemplateParmPack(); 6094 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), 6095 NameLoc); 6096 } 6097 } 6098 6099 llvm_unreachable("bad template name kind!"); 6100 } 6101 6102 TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const { 6103 switch (Name.getKind()) { 6104 case TemplateName::QualifiedTemplate: 6105 case TemplateName::Template: { 6106 TemplateDecl *Template = Name.getAsTemplateDecl(); 6107 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) 6108 Template = getCanonicalTemplateTemplateParmDecl(TTP); 6109 6110 // The canonical template name is the canonical template declaration. 6111 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); 6112 } 6113 6114 case TemplateName::OverloadedTemplate: 6115 case TemplateName::AssumedTemplate: 6116 llvm_unreachable("cannot canonicalize unresolved template"); 6117 6118 case TemplateName::DependentTemplate: { 6119 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6120 assert(DTN && "Non-dependent template names must refer to template decls."); 6121 return DTN->CanonicalTemplateName; 6122 } 6123 6124 case TemplateName::SubstTemplateTemplateParm: { 6125 SubstTemplateTemplateParmStorage *subst 6126 = Name.getAsSubstTemplateTemplateParm(); 6127 return getCanonicalTemplateName(subst->getReplacement()); 6128 } 6129 6130 case TemplateName::SubstTemplateTemplateParmPack: { 6131 SubstTemplateTemplateParmPackStorage *subst 6132 = Name.getAsSubstTemplateTemplateParmPack(); 6133 TemplateTemplateParmDecl *canonParameter 6134 = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack()); 6135 TemplateArgument canonArgPack 6136 = getCanonicalTemplateArgument(subst->getArgumentPack()); 6137 return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack); 6138 } 6139 } 6140 6141 llvm_unreachable("bad template name!"); 6142 } 6143 6144 bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) { 6145 X = getCanonicalTemplateName(X); 6146 Y = getCanonicalTemplateName(Y); 6147 return X.getAsVoidPointer() == Y.getAsVoidPointer(); 6148 } 6149 6150 TemplateArgument 6151 ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { 6152 switch (Arg.getKind()) { 6153 case TemplateArgument::Null: 6154 return Arg; 6155 6156 case TemplateArgument::Expression: 6157 return Arg; 6158 6159 case TemplateArgument::Declaration: { 6160 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); 6161 return TemplateArgument(D, Arg.getParamTypeForDecl()); 6162 } 6163 6164 case TemplateArgument::NullPtr: 6165 return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), 6166 /*isNullPtr*/true); 6167 6168 case TemplateArgument::Template: 6169 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate())); 6170 6171 case TemplateArgument::TemplateExpansion: 6172 return TemplateArgument(getCanonicalTemplateName( 6173 Arg.getAsTemplateOrTemplatePattern()), 6174 Arg.getNumTemplateExpansions()); 6175 6176 case TemplateArgument::Integral: 6177 return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); 6178 6179 case TemplateArgument::Type: 6180 return TemplateArgument(getCanonicalType(Arg.getAsType())); 6181 6182 case TemplateArgument::Pack: { 6183 if (Arg.pack_size() == 0) 6184 return Arg; 6185 6186 auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()]; 6187 unsigned Idx = 0; 6188 for (TemplateArgument::pack_iterator A = Arg.pack_begin(), 6189 AEnd = Arg.pack_end(); 6190 A != AEnd; (void)++A, ++Idx) 6191 CanonArgs[Idx] = getCanonicalTemplateArgument(*A); 6192 6193 return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size())); 6194 } 6195 } 6196 6197 // Silence GCC warning 6198 llvm_unreachable("Unhandled template argument kind"); 6199 } 6200 6201 NestedNameSpecifier * 6202 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { 6203 if (!NNS) 6204 return nullptr; 6205 6206 switch (NNS->getKind()) { 6207 case NestedNameSpecifier::Identifier: 6208 // Canonicalize the prefix but keep the identifier the same. 6209 return NestedNameSpecifier::Create(*this, 6210 getCanonicalNestedNameSpecifier(NNS->getPrefix()), 6211 NNS->getAsIdentifier()); 6212 6213 case NestedNameSpecifier::Namespace: 6214 // A namespace is canonical; build a nested-name-specifier with 6215 // this namespace and no prefix. 6216 return NestedNameSpecifier::Create(*this, nullptr, 6217 NNS->getAsNamespace()->getOriginalNamespace()); 6218 6219 case NestedNameSpecifier::NamespaceAlias: 6220 // A namespace is canonical; build a nested-name-specifier with 6221 // this namespace and no prefix. 6222 return NestedNameSpecifier::Create(*this, nullptr, 6223 NNS->getAsNamespaceAlias()->getNamespace() 6224 ->getOriginalNamespace()); 6225 6226 // The difference between TypeSpec and TypeSpecWithTemplate is that the 6227 // latter will have the 'template' keyword when printed. 6228 case NestedNameSpecifier::TypeSpec: 6229 case NestedNameSpecifier::TypeSpecWithTemplate: { 6230 const Type *T = getCanonicalType(NNS->getAsType()); 6231 6232 // If we have some kind of dependent-named type (e.g., "typename T::type"), 6233 // break it apart into its prefix and identifier, then reconsititute those 6234 // as the canonical nested-name-specifier. This is required to canonicalize 6235 // a dependent nested-name-specifier involving typedefs of dependent-name 6236 // types, e.g., 6237 // typedef typename T::type T1; 6238 // typedef typename T1::type T2; 6239 if (const auto *DNT = T->getAs<DependentNameType>()) 6240 return NestedNameSpecifier::Create( 6241 *this, DNT->getQualifier(), 6242 const_cast<IdentifierInfo *>(DNT->getIdentifier())); 6243 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) 6244 return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, 6245 const_cast<Type *>(T)); 6246 6247 // TODO: Set 'Template' parameter to true for other template types. 6248 return NestedNameSpecifier::Create(*this, nullptr, false, 6249 const_cast<Type *>(T)); 6250 } 6251 6252 case NestedNameSpecifier::Global: 6253 case NestedNameSpecifier::Super: 6254 // The global specifier and __super specifer are canonical and unique. 6255 return NNS; 6256 } 6257 6258 llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); 6259 } 6260 6261 const ArrayType *ASTContext::getAsArrayType(QualType T) const { 6262 // Handle the non-qualified case efficiently. 6263 if (!T.hasLocalQualifiers()) { 6264 // Handle the common positive case fast. 6265 if (const auto *AT = dyn_cast<ArrayType>(T)) 6266 return AT; 6267 } 6268 6269 // Handle the common negative case fast. 6270 if (!isa<ArrayType>(T.getCanonicalType())) 6271 return nullptr; 6272 6273 // Apply any qualifiers from the array type to the element type. This 6274 // implements C99 6.7.3p8: "If the specification of an array type includes 6275 // any type qualifiers, the element type is so qualified, not the array type." 6276 6277 // If we get here, we either have type qualifiers on the type, or we have 6278 // sugar such as a typedef in the way. If we have type qualifiers on the type 6279 // we must propagate them down into the element type. 6280 6281 SplitQualType split = T.getSplitDesugaredType(); 6282 Qualifiers qs = split.Quals; 6283 6284 // If we have a simple case, just return now. 6285 const auto *ATy = dyn_cast<ArrayType>(split.Ty); 6286 if (!ATy || qs.empty()) 6287 return ATy; 6288 6289 // Otherwise, we have an array and we have qualifiers on it. Push the 6290 // qualifiers into the array element type and return a new array type. 6291 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); 6292 6293 if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy)) 6294 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), 6295 CAT->getSizeExpr(), 6296 CAT->getSizeModifier(), 6297 CAT->getIndexTypeCVRQualifiers())); 6298 if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy)) 6299 return cast<ArrayType>(getIncompleteArrayType(NewEltTy, 6300 IAT->getSizeModifier(), 6301 IAT->getIndexTypeCVRQualifiers())); 6302 6303 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy)) 6304 return cast<ArrayType>( 6305 getDependentSizedArrayType(NewEltTy, 6306 DSAT->getSizeExpr(), 6307 DSAT->getSizeModifier(), 6308 DSAT->getIndexTypeCVRQualifiers(), 6309 DSAT->getBracketsRange())); 6310 6311 const auto *VAT = cast<VariableArrayType>(ATy); 6312 return cast<ArrayType>(getVariableArrayType(NewEltTy, 6313 VAT->getSizeExpr(), 6314 VAT->getSizeModifier(), 6315 VAT->getIndexTypeCVRQualifiers(), 6316 VAT->getBracketsRange())); 6317 } 6318 6319 QualType ASTContext::getAdjustedParameterType(QualType T) const { 6320 if (T->isArrayType() || T->isFunctionType()) 6321 return getDecayedType(T); 6322 return T; 6323 } 6324 6325 QualType ASTContext::getSignatureParameterType(QualType T) const { 6326 T = getVariableArrayDecayedType(T); 6327 T = getAdjustedParameterType(T); 6328 return T.getUnqualifiedType(); 6329 } 6330 6331 QualType ASTContext::getExceptionObjectType(QualType T) const { 6332 // C++ [except.throw]p3: 6333 // A throw-expression initializes a temporary object, called the exception 6334 // object, the type of which is determined by removing any top-level 6335 // cv-qualifiers from the static type of the operand of throw and adjusting 6336 // the type from "array of T" or "function returning T" to "pointer to T" 6337 // or "pointer to function returning T", [...] 6338 T = getVariableArrayDecayedType(T); 6339 if (T->isArrayType() || T->isFunctionType()) 6340 T = getDecayedType(T); 6341 return T.getUnqualifiedType(); 6342 } 6343 6344 /// getArrayDecayedType - Return the properly qualified result of decaying the 6345 /// specified array type to a pointer. This operation is non-trivial when 6346 /// handling typedefs etc. The canonical type of "T" must be an array type, 6347 /// this returns a pointer to a properly qualified element of the array. 6348 /// 6349 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. 6350 QualType ASTContext::getArrayDecayedType(QualType Ty) const { 6351 // Get the element type with 'getAsArrayType' so that we don't lose any 6352 // typedefs in the element type of the array. This also handles propagation 6353 // of type qualifiers from the array type into the element type if present 6354 // (C99 6.7.3p8). 6355 const ArrayType *PrettyArrayType = getAsArrayType(Ty); 6356 assert(PrettyArrayType && "Not an array type!"); 6357 6358 QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); 6359 6360 // int x[restrict 4] -> int *restrict 6361 QualType Result = getQualifiedType(PtrTy, 6362 PrettyArrayType->getIndexTypeQualifiers()); 6363 6364 // int x[_Nullable] -> int * _Nullable 6365 if (auto Nullability = Ty->getNullability(*this)) { 6366 Result = const_cast<ASTContext *>(this)->getAttributedType( 6367 AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); 6368 } 6369 return Result; 6370 } 6371 6372 QualType ASTContext::getBaseElementType(const ArrayType *array) const { 6373 return getBaseElementType(array->getElementType()); 6374 } 6375 6376 QualType ASTContext::getBaseElementType(QualType type) const { 6377 Qualifiers qs; 6378 while (true) { 6379 SplitQualType split = type.getSplitDesugaredType(); 6380 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); 6381 if (!array) break; 6382 6383 type = array->getElementType(); 6384 qs.addConsistentQualifiers(split.Quals); 6385 } 6386 6387 return getQualifiedType(type, qs); 6388 } 6389 6390 /// getConstantArrayElementCount - Returns number of constant array elements. 6391 uint64_t 6392 ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { 6393 uint64_t ElementCount = 1; 6394 do { 6395 ElementCount *= CA->getSize().getZExtValue(); 6396 CA = dyn_cast_or_null<ConstantArrayType>( 6397 CA->getElementType()->getAsArrayTypeUnsafe()); 6398 } while (CA); 6399 return ElementCount; 6400 } 6401 6402 /// getFloatingRank - Return a relative rank for floating point types. 6403 /// This routine will assert if passed a built-in type that isn't a float. 6404 static FloatingRank getFloatingRank(QualType T) { 6405 if (const auto *CT = T->getAs<ComplexType>()) 6406 return getFloatingRank(CT->getElementType()); 6407 6408 switch (T->castAs<BuiltinType>()->getKind()) { 6409 default: llvm_unreachable("getFloatingRank(): not a floating type"); 6410 case BuiltinType::Float16: return Float16Rank; 6411 case BuiltinType::Half: return HalfRank; 6412 case BuiltinType::Float: return FloatRank; 6413 case BuiltinType::Double: return DoubleRank; 6414 case BuiltinType::LongDouble: return LongDoubleRank; 6415 case BuiltinType::Float128: return Float128Rank; 6416 case BuiltinType::BFloat16: return BFloat16Rank; 6417 case BuiltinType::Ibm128: return Ibm128Rank; 6418 } 6419 } 6420 6421 /// getFloatingTypeOfSizeWithinDomain - Returns a real floating 6422 /// point or a complex type (based on typeDomain/typeSize). 6423 /// 'typeDomain' is a real floating point or complex type. 6424 /// 'typeSize' is a real floating point or complex type. 6425 QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size, 6426 QualType Domain) const { 6427 FloatingRank EltRank = getFloatingRank(Size); 6428 if (Domain->isComplexType()) { 6429 switch (EltRank) { 6430 case BFloat16Rank: llvm_unreachable("Complex bfloat16 is not supported"); 6431 case Float16Rank: 6432 case HalfRank: llvm_unreachable("Complex half is not supported"); 6433 case Ibm128Rank: return getComplexType(Ibm128Ty); 6434 case FloatRank: return getComplexType(FloatTy); 6435 case DoubleRank: return getComplexType(DoubleTy); 6436 case LongDoubleRank: return getComplexType(LongDoubleTy); 6437 case Float128Rank: return getComplexType(Float128Ty); 6438 } 6439 } 6440 6441 assert(Domain->isRealFloatingType() && "Unknown domain!"); 6442 switch (EltRank) { 6443 case Float16Rank: return HalfTy; 6444 case BFloat16Rank: return BFloat16Ty; 6445 case HalfRank: return HalfTy; 6446 case FloatRank: return FloatTy; 6447 case DoubleRank: return DoubleTy; 6448 case LongDoubleRank: return LongDoubleTy; 6449 case Float128Rank: return Float128Ty; 6450 case Ibm128Rank: 6451 return Ibm128Ty; 6452 } 6453 llvm_unreachable("getFloatingRank(): illegal value for rank"); 6454 } 6455 6456 /// getFloatingTypeOrder - Compare the rank of the two specified floating 6457 /// point types, ignoring the domain of the type (i.e. 'double' == 6458 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If 6459 /// LHS < RHS, return -1. 6460 int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { 6461 FloatingRank LHSR = getFloatingRank(LHS); 6462 FloatingRank RHSR = getFloatingRank(RHS); 6463 6464 if (LHSR == RHSR) 6465 return 0; 6466 if (LHSR > RHSR) 6467 return 1; 6468 return -1; 6469 } 6470 6471 int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { 6472 if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) 6473 return 0; 6474 return getFloatingTypeOrder(LHS, RHS); 6475 } 6476 6477 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This 6478 /// routine will assert if passed a built-in type that isn't an integer or enum, 6479 /// or if it is not canonicalized. 6480 unsigned ASTContext::getIntegerRank(const Type *T) const { 6481 assert(T->isCanonicalUnqualified() && "T should be canonicalized"); 6482 6483 // Results in this 'losing' to any type of the same size, but winning if 6484 // larger. 6485 if (const auto *EIT = dyn_cast<BitIntType>(T)) 6486 return 0 + (EIT->getNumBits() << 3); 6487 6488 switch (cast<BuiltinType>(T)->getKind()) { 6489 default: llvm_unreachable("getIntegerRank(): not a built-in integer"); 6490 case BuiltinType::Bool: 6491 return 1 + (getIntWidth(BoolTy) << 3); 6492 case BuiltinType::Char_S: 6493 case BuiltinType::Char_U: 6494 case BuiltinType::SChar: 6495 case BuiltinType::UChar: 6496 return 2 + (getIntWidth(CharTy) << 3); 6497 case BuiltinType::Short: 6498 case BuiltinType::UShort: 6499 return 3 + (getIntWidth(ShortTy) << 3); 6500 case BuiltinType::Int: 6501 case BuiltinType::UInt: 6502 return 4 + (getIntWidth(IntTy) << 3); 6503 case BuiltinType::Long: 6504 case BuiltinType::ULong: 6505 return 5 + (getIntWidth(LongTy) << 3); 6506 case BuiltinType::LongLong: 6507 case BuiltinType::ULongLong: 6508 return 6 + (getIntWidth(LongLongTy) << 3); 6509 case BuiltinType::Int128: 6510 case BuiltinType::UInt128: 6511 return 7 + (getIntWidth(Int128Ty) << 3); 6512 } 6513 } 6514 6515 /// Whether this is a promotable bitfield reference according 6516 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). 6517 /// 6518 /// \returns the type this bit-field will promote to, or NULL if no 6519 /// promotion occurs. 6520 QualType ASTContext::isPromotableBitField(Expr *E) const { 6521 if (E->isTypeDependent() || E->isValueDependent()) 6522 return {}; 6523 6524 // C++ [conv.prom]p5: 6525 // If the bit-field has an enumerated type, it is treated as any other 6526 // value of that type for promotion purposes. 6527 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) 6528 return {}; 6529 6530 // FIXME: We should not do this unless E->refersToBitField() is true. This 6531 // matters in C where getSourceBitField() will find bit-fields for various 6532 // cases where the source expression is not a bit-field designator. 6533 6534 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? 6535 if (!Field) 6536 return {}; 6537 6538 QualType FT = Field->getType(); 6539 6540 uint64_t BitWidth = Field->getBitWidthValue(*this); 6541 uint64_t IntSize = getTypeSize(IntTy); 6542 // C++ [conv.prom]p5: 6543 // A prvalue for an integral bit-field can be converted to a prvalue of type 6544 // int if int can represent all the values of the bit-field; otherwise, it 6545 // can be converted to unsigned int if unsigned int can represent all the 6546 // values of the bit-field. If the bit-field is larger yet, no integral 6547 // promotion applies to it. 6548 // C11 6.3.1.1/2: 6549 // [For a bit-field of type _Bool, int, signed int, or unsigned int:] 6550 // If an int can represent all values of the original type (as restricted by 6551 // the width, for a bit-field), the value is converted to an int; otherwise, 6552 // it is converted to an unsigned int. 6553 // 6554 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. 6555 // We perform that promotion here to match GCC and C++. 6556 // FIXME: C does not permit promotion of an enum bit-field whose rank is 6557 // greater than that of 'int'. We perform that promotion to match GCC. 6558 if (BitWidth < IntSize) 6559 return IntTy; 6560 6561 if (BitWidth == IntSize) 6562 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; 6563 6564 // Bit-fields wider than int are not subject to promotions, and therefore act 6565 // like the base type. GCC has some weird bugs in this area that we 6566 // deliberately do not follow (GCC follows a pre-standard resolution to 6567 // C's DR315 which treats bit-width as being part of the type, and this leaks 6568 // into their semantics in some cases). 6569 return {}; 6570 } 6571 6572 /// getPromotedIntegerType - Returns the type that Promotable will 6573 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable 6574 /// integer type. 6575 QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { 6576 assert(!Promotable.isNull()); 6577 assert(Promotable->isPromotableIntegerType()); 6578 if (const auto *ET = Promotable->getAs<EnumType>()) 6579 return ET->getDecl()->getPromotionType(); 6580 6581 if (const auto *BT = Promotable->getAs<BuiltinType>()) { 6582 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t 6583 // (3.9.1) can be converted to a prvalue of the first of the following 6584 // types that can represent all the values of its underlying type: 6585 // int, unsigned int, long int, unsigned long int, long long int, or 6586 // unsigned long long int [...] 6587 // FIXME: Is there some better way to compute this? 6588 if (BT->getKind() == BuiltinType::WChar_S || 6589 BT->getKind() == BuiltinType::WChar_U || 6590 BT->getKind() == BuiltinType::Char8 || 6591 BT->getKind() == BuiltinType::Char16 || 6592 BT->getKind() == BuiltinType::Char32) { 6593 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; 6594 uint64_t FromSize = getTypeSize(BT); 6595 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, 6596 LongLongTy, UnsignedLongLongTy }; 6597 for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) { 6598 uint64_t ToSize = getTypeSize(PromoteTypes[Idx]); 6599 if (FromSize < ToSize || 6600 (FromSize == ToSize && 6601 FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType())) 6602 return PromoteTypes[Idx]; 6603 } 6604 llvm_unreachable("char type should fit into long long"); 6605 } 6606 } 6607 6608 // At this point, we should have a signed or unsigned integer type. 6609 if (Promotable->isSignedIntegerType()) 6610 return IntTy; 6611 uint64_t PromotableSize = getIntWidth(Promotable); 6612 uint64_t IntSize = getIntWidth(IntTy); 6613 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); 6614 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; 6615 } 6616 6617 /// Recurses in pointer/array types until it finds an objc retainable 6618 /// type and returns its ownership. 6619 Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { 6620 while (!T.isNull()) { 6621 if (T.getObjCLifetime() != Qualifiers::OCL_None) 6622 return T.getObjCLifetime(); 6623 if (T->isArrayType()) 6624 T = getBaseElementType(T); 6625 else if (const auto *PT = T->getAs<PointerType>()) 6626 T = PT->getPointeeType(); 6627 else if (const auto *RT = T->getAs<ReferenceType>()) 6628 T = RT->getPointeeType(); 6629 else 6630 break; 6631 } 6632 6633 return Qualifiers::OCL_None; 6634 } 6635 6636 static const Type *getIntegerTypeForEnum(const EnumType *ET) { 6637 // Incomplete enum types are not treated as integer types. 6638 // FIXME: In C++, enum types are never integer types. 6639 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) 6640 return ET->getDecl()->getIntegerType().getTypePtr(); 6641 return nullptr; 6642 } 6643 6644 /// getIntegerTypeOrder - Returns the highest ranked integer type: 6645 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If 6646 /// LHS < RHS, return -1. 6647 int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { 6648 const Type *LHSC = getCanonicalType(LHS).getTypePtr(); 6649 const Type *RHSC = getCanonicalType(RHS).getTypePtr(); 6650 6651 // Unwrap enums to their underlying type. 6652 if (const auto *ET = dyn_cast<EnumType>(LHSC)) 6653 LHSC = getIntegerTypeForEnum(ET); 6654 if (const auto *ET = dyn_cast<EnumType>(RHSC)) 6655 RHSC = getIntegerTypeForEnum(ET); 6656 6657 if (LHSC == RHSC) return 0; 6658 6659 bool LHSUnsigned = LHSC->isUnsignedIntegerType(); 6660 bool RHSUnsigned = RHSC->isUnsignedIntegerType(); 6661 6662 unsigned LHSRank = getIntegerRank(LHSC); 6663 unsigned RHSRank = getIntegerRank(RHSC); 6664 6665 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. 6666 if (LHSRank == RHSRank) return 0; 6667 return LHSRank > RHSRank ? 1 : -1; 6668 } 6669 6670 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. 6671 if (LHSUnsigned) { 6672 // If the unsigned [LHS] type is larger, return it. 6673 if (LHSRank >= RHSRank) 6674 return 1; 6675 6676 // If the signed type can represent all values of the unsigned type, it 6677 // wins. Because we are dealing with 2's complement and types that are 6678 // powers of two larger than each other, this is always safe. 6679 return -1; 6680 } 6681 6682 // If the unsigned [RHS] type is larger, return it. 6683 if (RHSRank >= LHSRank) 6684 return -1; 6685 6686 // If the signed type can represent all values of the unsigned type, it 6687 // wins. Because we are dealing with 2's complement and types that are 6688 // powers of two larger than each other, this is always safe. 6689 return 1; 6690 } 6691 6692 TypedefDecl *ASTContext::getCFConstantStringDecl() const { 6693 if (CFConstantStringTypeDecl) 6694 return CFConstantStringTypeDecl; 6695 6696 assert(!CFConstantStringTagDecl && 6697 "tag and typedef should be initialized together"); 6698 CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); 6699 CFConstantStringTagDecl->startDefinition(); 6700 6701 struct { 6702 QualType Type; 6703 const char *Name; 6704 } Fields[5]; 6705 unsigned Count = 0; 6706 6707 /// Objective-C ABI 6708 /// 6709 /// typedef struct __NSConstantString_tag { 6710 /// const int *isa; 6711 /// int flags; 6712 /// const char *str; 6713 /// long length; 6714 /// } __NSConstantString; 6715 /// 6716 /// Swift ABI (4.1, 4.2) 6717 /// 6718 /// typedef struct __NSConstantString_tag { 6719 /// uintptr_t _cfisa; 6720 /// uintptr_t _swift_rc; 6721 /// _Atomic(uint64_t) _cfinfoa; 6722 /// const char *_ptr; 6723 /// uint32_t _length; 6724 /// } __NSConstantString; 6725 /// 6726 /// Swift ABI (5.0) 6727 /// 6728 /// typedef struct __NSConstantString_tag { 6729 /// uintptr_t _cfisa; 6730 /// uintptr_t _swift_rc; 6731 /// _Atomic(uint64_t) _cfinfoa; 6732 /// const char *_ptr; 6733 /// uintptr_t _length; 6734 /// } __NSConstantString; 6735 6736 const auto CFRuntime = getLangOpts().CFRuntime; 6737 if (static_cast<unsigned>(CFRuntime) < 6738 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) { 6739 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; 6740 Fields[Count++] = { IntTy, "flags" }; 6741 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; 6742 Fields[Count++] = { LongTy, "length" }; 6743 } else { 6744 Fields[Count++] = { getUIntPtrType(), "_cfisa" }; 6745 Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; 6746 Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; 6747 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; 6748 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || 6749 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) 6750 Fields[Count++] = { IntTy, "_ptr" }; 6751 else 6752 Fields[Count++] = { getUIntPtrType(), "_ptr" }; 6753 } 6754 6755 // Create fields 6756 for (unsigned i = 0; i < Count; ++i) { 6757 FieldDecl *Field = 6758 FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), 6759 SourceLocation(), &Idents.get(Fields[i].Name), 6760 Fields[i].Type, /*TInfo=*/nullptr, 6761 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 6762 Field->setAccess(AS_public); 6763 CFConstantStringTagDecl->addDecl(Field); 6764 } 6765 6766 CFConstantStringTagDecl->completeDefinition(); 6767 // This type is designed to be compatible with NSConstantString, but cannot 6768 // use the same name, since NSConstantString is an interface. 6769 auto tagType = getTagDeclType(CFConstantStringTagDecl); 6770 CFConstantStringTypeDecl = 6771 buildImplicitTypedef(tagType, "__NSConstantString"); 6772 6773 return CFConstantStringTypeDecl; 6774 } 6775 6776 RecordDecl *ASTContext::getCFConstantStringTagDecl() const { 6777 if (!CFConstantStringTagDecl) 6778 getCFConstantStringDecl(); // Build the tag and the typedef. 6779 return CFConstantStringTagDecl; 6780 } 6781 6782 // getCFConstantStringType - Return the type used for constant CFStrings. 6783 QualType ASTContext::getCFConstantStringType() const { 6784 return getTypedefType(getCFConstantStringDecl()); 6785 } 6786 6787 QualType ASTContext::getObjCSuperType() const { 6788 if (ObjCSuperType.isNull()) { 6789 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); 6790 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl); 6791 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); 6792 } 6793 return ObjCSuperType; 6794 } 6795 6796 void ASTContext::setCFConstantStringType(QualType T) { 6797 const auto *TD = T->castAs<TypedefType>(); 6798 CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl()); 6799 const auto *TagType = 6800 CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>(); 6801 CFConstantStringTagDecl = TagType->getDecl(); 6802 } 6803 6804 QualType ASTContext::getBlockDescriptorType() const { 6805 if (BlockDescriptorType) 6806 return getTagDeclType(BlockDescriptorType); 6807 6808 RecordDecl *RD; 6809 // FIXME: Needs the FlagAppleBlock bit. 6810 RD = buildImplicitRecord("__block_descriptor"); 6811 RD->startDefinition(); 6812 6813 QualType FieldTypes[] = { 6814 UnsignedLongTy, 6815 UnsignedLongTy, 6816 }; 6817 6818 static const char *const FieldNames[] = { 6819 "reserved", 6820 "Size" 6821 }; 6822 6823 for (size_t i = 0; i < 2; ++i) { 6824 FieldDecl *Field = FieldDecl::Create( 6825 *this, RD, SourceLocation(), SourceLocation(), 6826 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 6827 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 6828 Field->setAccess(AS_public); 6829 RD->addDecl(Field); 6830 } 6831 6832 RD->completeDefinition(); 6833 6834 BlockDescriptorType = RD; 6835 6836 return getTagDeclType(BlockDescriptorType); 6837 } 6838 6839 QualType ASTContext::getBlockDescriptorExtendedType() const { 6840 if (BlockDescriptorExtendedType) 6841 return getTagDeclType(BlockDescriptorExtendedType); 6842 6843 RecordDecl *RD; 6844 // FIXME: Needs the FlagAppleBlock bit. 6845 RD = buildImplicitRecord("__block_descriptor_withcopydispose"); 6846 RD->startDefinition(); 6847 6848 QualType FieldTypes[] = { 6849 UnsignedLongTy, 6850 UnsignedLongTy, 6851 getPointerType(VoidPtrTy), 6852 getPointerType(VoidPtrTy) 6853 }; 6854 6855 static const char *const FieldNames[] = { 6856 "reserved", 6857 "Size", 6858 "CopyFuncPtr", 6859 "DestroyFuncPtr" 6860 }; 6861 6862 for (size_t i = 0; i < 4; ++i) { 6863 FieldDecl *Field = FieldDecl::Create( 6864 *this, RD, SourceLocation(), SourceLocation(), 6865 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 6866 /*BitWidth=*/nullptr, 6867 /*Mutable=*/false, ICIS_NoInit); 6868 Field->setAccess(AS_public); 6869 RD->addDecl(Field); 6870 } 6871 6872 RD->completeDefinition(); 6873 6874 BlockDescriptorExtendedType = RD; 6875 return getTagDeclType(BlockDescriptorExtendedType); 6876 } 6877 6878 OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { 6879 const auto *BT = dyn_cast<BuiltinType>(T); 6880 6881 if (!BT) { 6882 if (isa<PipeType>(T)) 6883 return OCLTK_Pipe; 6884 6885 return OCLTK_Default; 6886 } 6887 6888 switch (BT->getKind()) { 6889 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 6890 case BuiltinType::Id: \ 6891 return OCLTK_Image; 6892 #include "clang/Basic/OpenCLImageTypes.def" 6893 6894 case BuiltinType::OCLClkEvent: 6895 return OCLTK_ClkEvent; 6896 6897 case BuiltinType::OCLEvent: 6898 return OCLTK_Event; 6899 6900 case BuiltinType::OCLQueue: 6901 return OCLTK_Queue; 6902 6903 case BuiltinType::OCLReserveID: 6904 return OCLTK_ReserveID; 6905 6906 case BuiltinType::OCLSampler: 6907 return OCLTK_Sampler; 6908 6909 default: 6910 return OCLTK_Default; 6911 } 6912 } 6913 6914 LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { 6915 return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 6916 } 6917 6918 /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" 6919 /// requires copy/dispose. Note that this must match the logic 6920 /// in buildByrefHelpers. 6921 bool ASTContext::BlockRequiresCopying(QualType Ty, 6922 const VarDecl *D) { 6923 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { 6924 const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); 6925 if (!copyExpr && record->hasTrivialDestructor()) return false; 6926 6927 return true; 6928 } 6929 6930 // The block needs copy/destroy helpers if Ty is non-trivial to destructively 6931 // move or destroy. 6932 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) 6933 return true; 6934 6935 if (!Ty->isObjCRetainableType()) return false; 6936 6937 Qualifiers qs = Ty.getQualifiers(); 6938 6939 // If we have lifetime, that dominates. 6940 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { 6941 switch (lifetime) { 6942 case Qualifiers::OCL_None: llvm_unreachable("impossible"); 6943 6944 // These are just bits as far as the runtime is concerned. 6945 case Qualifiers::OCL_ExplicitNone: 6946 case Qualifiers::OCL_Autoreleasing: 6947 return false; 6948 6949 // These cases should have been taken care of when checking the type's 6950 // non-triviality. 6951 case Qualifiers::OCL_Weak: 6952 case Qualifiers::OCL_Strong: 6953 llvm_unreachable("impossible"); 6954 } 6955 llvm_unreachable("fell out of lifetime switch!"); 6956 } 6957 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || 6958 Ty->isObjCObjectPointerType()); 6959 } 6960 6961 bool ASTContext::getByrefLifetime(QualType Ty, 6962 Qualifiers::ObjCLifetime &LifeTime, 6963 bool &HasByrefExtendedLayout) const { 6964 if (!getLangOpts().ObjC || 6965 getLangOpts().getGC() != LangOptions::NonGC) 6966 return false; 6967 6968 HasByrefExtendedLayout = false; 6969 if (Ty->isRecordType()) { 6970 HasByrefExtendedLayout = true; 6971 LifeTime = Qualifiers::OCL_None; 6972 } else if ((LifeTime = Ty.getObjCLifetime())) { 6973 // Honor the ARC qualifiers. 6974 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { 6975 // The MRR rule. 6976 LifeTime = Qualifiers::OCL_ExplicitNone; 6977 } else { 6978 LifeTime = Qualifiers::OCL_None; 6979 } 6980 return true; 6981 } 6982 6983 CanQualType ASTContext::getNSUIntegerType() const { 6984 assert(Target && "Expected target to be initialized"); 6985 const llvm::Triple &T = Target->getTriple(); 6986 // Windows is LLP64 rather than LP64 6987 if (T.isOSWindows() && T.isArch64Bit()) 6988 return UnsignedLongLongTy; 6989 return UnsignedLongTy; 6990 } 6991 6992 CanQualType ASTContext::getNSIntegerType() const { 6993 assert(Target && "Expected target to be initialized"); 6994 const llvm::Triple &T = Target->getTriple(); 6995 // Windows is LLP64 rather than LP64 6996 if (T.isOSWindows() && T.isArch64Bit()) 6997 return LongLongTy; 6998 return LongTy; 6999 } 7000 7001 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { 7002 if (!ObjCInstanceTypeDecl) 7003 ObjCInstanceTypeDecl = 7004 buildImplicitTypedef(getObjCIdType(), "instancetype"); 7005 return ObjCInstanceTypeDecl; 7006 } 7007 7008 // This returns true if a type has been typedefed to BOOL: 7009 // typedef <type> BOOL; 7010 static bool isTypeTypedefedAsBOOL(QualType T) { 7011 if (const auto *TT = dyn_cast<TypedefType>(T)) 7012 if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) 7013 return II->isStr("BOOL"); 7014 7015 return false; 7016 } 7017 7018 /// getObjCEncodingTypeSize returns size of type for objective-c encoding 7019 /// purpose. 7020 CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { 7021 if (!type->isIncompleteArrayType() && type->isIncompleteType()) 7022 return CharUnits::Zero(); 7023 7024 CharUnits sz = getTypeSizeInChars(type); 7025 7026 // Make all integer and enum types at least as large as an int 7027 if (sz.isPositive() && type->isIntegralOrEnumerationType()) 7028 sz = std::max(sz, getTypeSizeInChars(IntTy)); 7029 // Treat arrays as pointers, since that's how they're passed in. 7030 else if (type->isArrayType()) 7031 sz = getTypeSizeInChars(VoidPtrTy); 7032 return sz; 7033 } 7034 7035 bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { 7036 return getTargetInfo().getCXXABI().isMicrosoft() && 7037 VD->isStaticDataMember() && 7038 VD->getType()->isIntegralOrEnumerationType() && 7039 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); 7040 } 7041 7042 ASTContext::InlineVariableDefinitionKind 7043 ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { 7044 if (!VD->isInline()) 7045 return InlineVariableDefinitionKind::None; 7046 7047 // In almost all cases, it's a weak definition. 7048 auto *First = VD->getFirstDecl(); 7049 if (First->isInlineSpecified() || !First->isStaticDataMember()) 7050 return InlineVariableDefinitionKind::Weak; 7051 7052 // If there's a file-context declaration in this translation unit, it's a 7053 // non-discardable definition. 7054 for (auto *D : VD->redecls()) 7055 if (D->getLexicalDeclContext()->isFileContext() && 7056 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) 7057 return InlineVariableDefinitionKind::Strong; 7058 7059 // If we've not seen one yet, we don't know. 7060 return InlineVariableDefinitionKind::WeakUnknown; 7061 } 7062 7063 static std::string charUnitsToString(const CharUnits &CU) { 7064 return llvm::itostr(CU.getQuantity()); 7065 } 7066 7067 /// getObjCEncodingForBlock - Return the encoded type for this block 7068 /// declaration. 7069 std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { 7070 std::string S; 7071 7072 const BlockDecl *Decl = Expr->getBlockDecl(); 7073 QualType BlockTy = 7074 Expr->getType()->castAs<BlockPointerType>()->getPointeeType(); 7075 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType(); 7076 // Encode result type. 7077 if (getLangOpts().EncodeExtendedBlockSig) 7078 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, 7079 true /*Extended*/); 7080 else 7081 getObjCEncodingForType(BlockReturnTy, S); 7082 // Compute size of all parameters. 7083 // Start with computing size of a pointer in number of bytes. 7084 // FIXME: There might(should) be a better way of doing this computation! 7085 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7086 CharUnits ParmOffset = PtrSize; 7087 for (auto PI : Decl->parameters()) { 7088 QualType PType = PI->getType(); 7089 CharUnits sz = getObjCEncodingTypeSize(PType); 7090 if (sz.isZero()) 7091 continue; 7092 assert(sz.isPositive() && "BlockExpr - Incomplete param type"); 7093 ParmOffset += sz; 7094 } 7095 // Size of the argument frame 7096 S += charUnitsToString(ParmOffset); 7097 // Block pointer and offset. 7098 S += "@?0"; 7099 7100 // Argument types. 7101 ParmOffset = PtrSize; 7102 for (auto PVDecl : Decl->parameters()) { 7103 QualType PType = PVDecl->getOriginalType(); 7104 if (const auto *AT = 7105 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7106 // Use array's original type only if it has known number of 7107 // elements. 7108 if (!isa<ConstantArrayType>(AT)) 7109 PType = PVDecl->getType(); 7110 } else if (PType->isFunctionType()) 7111 PType = PVDecl->getType(); 7112 if (getLangOpts().EncodeExtendedBlockSig) 7113 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, 7114 S, true /*Extended*/); 7115 else 7116 getObjCEncodingForType(PType, S); 7117 S += charUnitsToString(ParmOffset); 7118 ParmOffset += getObjCEncodingTypeSize(PType); 7119 } 7120 7121 return S; 7122 } 7123 7124 std::string 7125 ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { 7126 std::string S; 7127 // Encode result type. 7128 getObjCEncodingForType(Decl->getReturnType(), S); 7129 CharUnits ParmOffset; 7130 // Compute size of all parameters. 7131 for (auto PI : Decl->parameters()) { 7132 QualType PType = PI->getType(); 7133 CharUnits sz = getObjCEncodingTypeSize(PType); 7134 if (sz.isZero()) 7135 continue; 7136 7137 assert(sz.isPositive() && 7138 "getObjCEncodingForFunctionDecl - Incomplete param type"); 7139 ParmOffset += sz; 7140 } 7141 S += charUnitsToString(ParmOffset); 7142 ParmOffset = CharUnits::Zero(); 7143 7144 // Argument types. 7145 for (auto PVDecl : Decl->parameters()) { 7146 QualType PType = PVDecl->getOriginalType(); 7147 if (const auto *AT = 7148 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7149 // Use array's original type only if it has known number of 7150 // elements. 7151 if (!isa<ConstantArrayType>(AT)) 7152 PType = PVDecl->getType(); 7153 } else if (PType->isFunctionType()) 7154 PType = PVDecl->getType(); 7155 getObjCEncodingForType(PType, S); 7156 S += charUnitsToString(ParmOffset); 7157 ParmOffset += getObjCEncodingTypeSize(PType); 7158 } 7159 7160 return S; 7161 } 7162 7163 /// getObjCEncodingForMethodParameter - Return the encoded type for a single 7164 /// method parameter or return type. If Extended, include class names and 7165 /// block object types. 7166 void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, 7167 QualType T, std::string& S, 7168 bool Extended) const { 7169 // Encode type qualifier, 'in', 'inout', etc. for the parameter. 7170 getObjCEncodingForTypeQualifier(QT, S); 7171 // Encode parameter type. 7172 ObjCEncOptions Options = ObjCEncOptions() 7173 .setExpandPointedToStructures() 7174 .setExpandStructures() 7175 .setIsOutermostType(); 7176 if (Extended) 7177 Options.setEncodeBlockParameters().setEncodeClassNames(); 7178 getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); 7179 } 7180 7181 /// getObjCEncodingForMethodDecl - Return the encoded type for this method 7182 /// declaration. 7183 std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, 7184 bool Extended) const { 7185 // FIXME: This is not very efficient. 7186 // Encode return type. 7187 std::string S; 7188 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), 7189 Decl->getReturnType(), S, Extended); 7190 // Compute size of all parameters. 7191 // Start with computing size of a pointer in number of bytes. 7192 // FIXME: There might(should) be a better way of doing this computation! 7193 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7194 // The first two arguments (self and _cmd) are pointers; account for 7195 // their size. 7196 CharUnits ParmOffset = 2 * PtrSize; 7197 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7198 E = Decl->sel_param_end(); PI != E; ++PI) { 7199 QualType PType = (*PI)->getType(); 7200 CharUnits sz = getObjCEncodingTypeSize(PType); 7201 if (sz.isZero()) 7202 continue; 7203 7204 assert(sz.isPositive() && 7205 "getObjCEncodingForMethodDecl - Incomplete param type"); 7206 ParmOffset += sz; 7207 } 7208 S += charUnitsToString(ParmOffset); 7209 S += "@0:"; 7210 S += charUnitsToString(PtrSize); 7211 7212 // Argument types. 7213 ParmOffset = 2 * PtrSize; 7214 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7215 E = Decl->sel_param_end(); PI != E; ++PI) { 7216 const ParmVarDecl *PVDecl = *PI; 7217 QualType PType = PVDecl->getOriginalType(); 7218 if (const auto *AT = 7219 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7220 // Use array's original type only if it has known number of 7221 // elements. 7222 if (!isa<ConstantArrayType>(AT)) 7223 PType = PVDecl->getType(); 7224 } else if (PType->isFunctionType()) 7225 PType = PVDecl->getType(); 7226 getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), 7227 PType, S, Extended); 7228 S += charUnitsToString(ParmOffset); 7229 ParmOffset += getObjCEncodingTypeSize(PType); 7230 } 7231 7232 return S; 7233 } 7234 7235 ObjCPropertyImplDecl * 7236 ASTContext::getObjCPropertyImplDeclForPropertyDecl( 7237 const ObjCPropertyDecl *PD, 7238 const Decl *Container) const { 7239 if (!Container) 7240 return nullptr; 7241 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) { 7242 for (auto *PID : CID->property_impls()) 7243 if (PID->getPropertyDecl() == PD) 7244 return PID; 7245 } else { 7246 const auto *OID = cast<ObjCImplementationDecl>(Container); 7247 for (auto *PID : OID->property_impls()) 7248 if (PID->getPropertyDecl() == PD) 7249 return PID; 7250 } 7251 return nullptr; 7252 } 7253 7254 /// getObjCEncodingForPropertyDecl - Return the encoded type for this 7255 /// property declaration. If non-NULL, Container must be either an 7256 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be 7257 /// NULL when getting encodings for protocol properties. 7258 /// Property attributes are stored as a comma-delimited C string. The simple 7259 /// attributes readonly and bycopy are encoded as single characters. The 7260 /// parametrized attributes, getter=name, setter=name, and ivar=name, are 7261 /// encoded as single characters, followed by an identifier. Property types 7262 /// are also encoded as a parametrized attribute. The characters used to encode 7263 /// these attributes are defined by the following enumeration: 7264 /// @code 7265 /// enum PropertyAttributes { 7266 /// kPropertyReadOnly = 'R', // property is read-only. 7267 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned 7268 /// kPropertyByref = '&', // property is a reference to the value last assigned 7269 /// kPropertyDynamic = 'D', // property is dynamic 7270 /// kPropertyGetter = 'G', // followed by getter selector name 7271 /// kPropertySetter = 'S', // followed by setter selector name 7272 /// kPropertyInstanceVariable = 'V' // followed by instance variable name 7273 /// kPropertyType = 'T' // followed by old-style type encoding. 7274 /// kPropertyWeak = 'W' // 'weak' property 7275 /// kPropertyStrong = 'P' // property GC'able 7276 /// kPropertyNonAtomic = 'N' // property non-atomic 7277 /// }; 7278 /// @endcode 7279 std::string 7280 ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, 7281 const Decl *Container) const { 7282 // Collect information from the property implementation decl(s). 7283 bool Dynamic = false; 7284 ObjCPropertyImplDecl *SynthesizePID = nullptr; 7285 7286 if (ObjCPropertyImplDecl *PropertyImpDecl = 7287 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { 7288 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) 7289 Dynamic = true; 7290 else 7291 SynthesizePID = PropertyImpDecl; 7292 } 7293 7294 // FIXME: This is not very efficient. 7295 std::string S = "T"; 7296 7297 // Encode result type. 7298 // GCC has some special rules regarding encoding of properties which 7299 // closely resembles encoding of ivars. 7300 getObjCEncodingForPropertyType(PD->getType(), S); 7301 7302 if (PD->isReadOnly()) { 7303 S += ",R"; 7304 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) 7305 S += ",C"; 7306 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) 7307 S += ",&"; 7308 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) 7309 S += ",W"; 7310 } else { 7311 switch (PD->getSetterKind()) { 7312 case ObjCPropertyDecl::Assign: break; 7313 case ObjCPropertyDecl::Copy: S += ",C"; break; 7314 case ObjCPropertyDecl::Retain: S += ",&"; break; 7315 case ObjCPropertyDecl::Weak: S += ",W"; break; 7316 } 7317 } 7318 7319 // It really isn't clear at all what this means, since properties 7320 // are "dynamic by default". 7321 if (Dynamic) 7322 S += ",D"; 7323 7324 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) 7325 S += ",N"; 7326 7327 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { 7328 S += ",G"; 7329 S += PD->getGetterName().getAsString(); 7330 } 7331 7332 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { 7333 S += ",S"; 7334 S += PD->getSetterName().getAsString(); 7335 } 7336 7337 if (SynthesizePID) { 7338 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); 7339 S += ",V"; 7340 S += OID->getNameAsString(); 7341 } 7342 7343 // FIXME: OBJCGC: weak & strong 7344 return S; 7345 } 7346 7347 /// getLegacyIntegralTypeEncoding - 7348 /// Another legacy compatibility encoding: 32-bit longs are encoded as 7349 /// 'l' or 'L' , but not always. For typedefs, we need to use 7350 /// 'i' or 'I' instead if encoding a struct field, or a pointer! 7351 void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { 7352 if (isa<TypedefType>(PointeeTy.getTypePtr())) { 7353 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) { 7354 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) 7355 PointeeTy = UnsignedIntTy; 7356 else 7357 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) 7358 PointeeTy = IntTy; 7359 } 7360 } 7361 } 7362 7363 void ASTContext::getObjCEncodingForType(QualType T, std::string& S, 7364 const FieldDecl *Field, 7365 QualType *NotEncodedT) const { 7366 // We follow the behavior of gcc, expanding structures which are 7367 // directly pointed to, and expanding embedded structures. Note that 7368 // these rules are sufficient to prevent recursive encoding of the 7369 // same type. 7370 getObjCEncodingForTypeImpl(T, S, 7371 ObjCEncOptions() 7372 .setExpandPointedToStructures() 7373 .setExpandStructures() 7374 .setIsOutermostType(), 7375 Field, NotEncodedT); 7376 } 7377 7378 void ASTContext::getObjCEncodingForPropertyType(QualType T, 7379 std::string& S) const { 7380 // Encode result type. 7381 // GCC has some special rules regarding encoding of properties which 7382 // closely resembles encoding of ivars. 7383 getObjCEncodingForTypeImpl(T, S, 7384 ObjCEncOptions() 7385 .setExpandPointedToStructures() 7386 .setExpandStructures() 7387 .setIsOutermostType() 7388 .setEncodingProperty(), 7389 /*Field=*/nullptr); 7390 } 7391 7392 static char getObjCEncodingForPrimitiveType(const ASTContext *C, 7393 const BuiltinType *BT) { 7394 BuiltinType::Kind kind = BT->getKind(); 7395 switch (kind) { 7396 case BuiltinType::Void: return 'v'; 7397 case BuiltinType::Bool: return 'B'; 7398 case BuiltinType::Char8: 7399 case BuiltinType::Char_U: 7400 case BuiltinType::UChar: return 'C'; 7401 case BuiltinType::Char16: 7402 case BuiltinType::UShort: return 'S'; 7403 case BuiltinType::Char32: 7404 case BuiltinType::UInt: return 'I'; 7405 case BuiltinType::ULong: 7406 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; 7407 case BuiltinType::UInt128: return 'T'; 7408 case BuiltinType::ULongLong: return 'Q'; 7409 case BuiltinType::Char_S: 7410 case BuiltinType::SChar: return 'c'; 7411 case BuiltinType::Short: return 's'; 7412 case BuiltinType::WChar_S: 7413 case BuiltinType::WChar_U: 7414 case BuiltinType::Int: return 'i'; 7415 case BuiltinType::Long: 7416 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; 7417 case BuiltinType::LongLong: return 'q'; 7418 case BuiltinType::Int128: return 't'; 7419 case BuiltinType::Float: return 'f'; 7420 case BuiltinType::Double: return 'd'; 7421 case BuiltinType::LongDouble: return 'D'; 7422 case BuiltinType::NullPtr: return '*'; // like char* 7423 7424 case BuiltinType::BFloat16: 7425 case BuiltinType::Float16: 7426 case BuiltinType::Float128: 7427 case BuiltinType::Ibm128: 7428 case BuiltinType::Half: 7429 case BuiltinType::ShortAccum: 7430 case BuiltinType::Accum: 7431 case BuiltinType::LongAccum: 7432 case BuiltinType::UShortAccum: 7433 case BuiltinType::UAccum: 7434 case BuiltinType::ULongAccum: 7435 case BuiltinType::ShortFract: 7436 case BuiltinType::Fract: 7437 case BuiltinType::LongFract: 7438 case BuiltinType::UShortFract: 7439 case BuiltinType::UFract: 7440 case BuiltinType::ULongFract: 7441 case BuiltinType::SatShortAccum: 7442 case BuiltinType::SatAccum: 7443 case BuiltinType::SatLongAccum: 7444 case BuiltinType::SatUShortAccum: 7445 case BuiltinType::SatUAccum: 7446 case BuiltinType::SatULongAccum: 7447 case BuiltinType::SatShortFract: 7448 case BuiltinType::SatFract: 7449 case BuiltinType::SatLongFract: 7450 case BuiltinType::SatUShortFract: 7451 case BuiltinType::SatUFract: 7452 case BuiltinType::SatULongFract: 7453 // FIXME: potentially need @encodes for these! 7454 return ' '; 7455 7456 #define SVE_TYPE(Name, Id, SingletonId) \ 7457 case BuiltinType::Id: 7458 #include "clang/Basic/AArch64SVEACLETypes.def" 7459 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 7460 #include "clang/Basic/RISCVVTypes.def" 7461 { 7462 DiagnosticsEngine &Diags = C->getDiagnostics(); 7463 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, 7464 "cannot yet @encode type %0"); 7465 Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); 7466 return ' '; 7467 } 7468 7469 case BuiltinType::ObjCId: 7470 case BuiltinType::ObjCClass: 7471 case BuiltinType::ObjCSel: 7472 llvm_unreachable("@encoding ObjC primitive type"); 7473 7474 // OpenCL and placeholder types don't need @encodings. 7475 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7476 case BuiltinType::Id: 7477 #include "clang/Basic/OpenCLImageTypes.def" 7478 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 7479 case BuiltinType::Id: 7480 #include "clang/Basic/OpenCLExtensionTypes.def" 7481 case BuiltinType::OCLEvent: 7482 case BuiltinType::OCLClkEvent: 7483 case BuiltinType::OCLQueue: 7484 case BuiltinType::OCLReserveID: 7485 case BuiltinType::OCLSampler: 7486 case BuiltinType::Dependent: 7487 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 7488 case BuiltinType::Id: 7489 #include "clang/Basic/PPCTypes.def" 7490 #define BUILTIN_TYPE(KIND, ID) 7491 #define PLACEHOLDER_TYPE(KIND, ID) \ 7492 case BuiltinType::KIND: 7493 #include "clang/AST/BuiltinTypes.def" 7494 llvm_unreachable("invalid builtin type for @encode"); 7495 } 7496 llvm_unreachable("invalid BuiltinType::Kind value"); 7497 } 7498 7499 static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { 7500 EnumDecl *Enum = ET->getDecl(); 7501 7502 // The encoding of an non-fixed enum type is always 'i', regardless of size. 7503 if (!Enum->isFixed()) 7504 return 'i'; 7505 7506 // The encoding of a fixed enum type matches its fixed underlying type. 7507 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>(); 7508 return getObjCEncodingForPrimitiveType(C, BT); 7509 } 7510 7511 static void EncodeBitField(const ASTContext *Ctx, std::string& S, 7512 QualType T, const FieldDecl *FD) { 7513 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); 7514 S += 'b'; 7515 // The NeXT runtime encodes bit fields as b followed by the number of bits. 7516 // The GNU runtime requires more information; bitfields are encoded as b, 7517 // then the offset (in bits) of the first element, then the type of the 7518 // bitfield, then the size in bits. For example, in this structure: 7519 // 7520 // struct 7521 // { 7522 // int integer; 7523 // int flags:2; 7524 // }; 7525 // On a 32-bit system, the encoding for flags would be b2 for the NeXT 7526 // runtime, but b32i2 for the GNU runtime. The reason for this extra 7527 // information is not especially sensible, but we're stuck with it for 7528 // compatibility with GCC, although providing it breaks anything that 7529 // actually uses runtime introspection and wants to work on both runtimes... 7530 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { 7531 uint64_t Offset; 7532 7533 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) { 7534 Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, 7535 IVD); 7536 } else { 7537 const RecordDecl *RD = FD->getParent(); 7538 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); 7539 Offset = RL.getFieldOffset(FD->getFieldIndex()); 7540 } 7541 7542 S += llvm::utostr(Offset); 7543 7544 if (const auto *ET = T->getAs<EnumType>()) 7545 S += ObjCEncodingForEnumType(Ctx, ET); 7546 else { 7547 const auto *BT = T->castAs<BuiltinType>(); 7548 S += getObjCEncodingForPrimitiveType(Ctx, BT); 7549 } 7550 } 7551 S += llvm::utostr(FD->getBitWidthValue(*Ctx)); 7552 } 7553 7554 // Helper function for determining whether the encoded type string would include 7555 // a template specialization type. 7556 static bool hasTemplateSpecializationInEncodedString(const Type *T, 7557 bool VisitBasesAndFields) { 7558 T = T->getBaseElementTypeUnsafe(); 7559 7560 if (auto *PT = T->getAs<PointerType>()) 7561 return hasTemplateSpecializationInEncodedString( 7562 PT->getPointeeType().getTypePtr(), false); 7563 7564 auto *CXXRD = T->getAsCXXRecordDecl(); 7565 7566 if (!CXXRD) 7567 return false; 7568 7569 if (isa<ClassTemplateSpecializationDecl>(CXXRD)) 7570 return true; 7571 7572 if (!CXXRD->hasDefinition() || !VisitBasesAndFields) 7573 return false; 7574 7575 for (auto B : CXXRD->bases()) 7576 if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), 7577 true)) 7578 return true; 7579 7580 for (auto *FD : CXXRD->fields()) 7581 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(), 7582 true)) 7583 return true; 7584 7585 return false; 7586 } 7587 7588 // FIXME: Use SmallString for accumulating string. 7589 void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, 7590 const ObjCEncOptions Options, 7591 const FieldDecl *FD, 7592 QualType *NotEncodedT) const { 7593 CanQualType CT = getCanonicalType(T); 7594 switch (CT->getTypeClass()) { 7595 case Type::Builtin: 7596 case Type::Enum: 7597 if (FD && FD->isBitField()) 7598 return EncodeBitField(this, S, T, FD); 7599 if (const auto *BT = dyn_cast<BuiltinType>(CT)) 7600 S += getObjCEncodingForPrimitiveType(this, BT); 7601 else 7602 S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); 7603 return; 7604 7605 case Type::Complex: 7606 S += 'j'; 7607 getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S, 7608 ObjCEncOptions(), 7609 /*Field=*/nullptr); 7610 return; 7611 7612 case Type::Atomic: 7613 S += 'A'; 7614 getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S, 7615 ObjCEncOptions(), 7616 /*Field=*/nullptr); 7617 return; 7618 7619 // encoding for pointer or reference types. 7620 case Type::Pointer: 7621 case Type::LValueReference: 7622 case Type::RValueReference: { 7623 QualType PointeeTy; 7624 if (isa<PointerType>(CT)) { 7625 const auto *PT = T->castAs<PointerType>(); 7626 if (PT->isObjCSelType()) { 7627 S += ':'; 7628 return; 7629 } 7630 PointeeTy = PT->getPointeeType(); 7631 } else { 7632 PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); 7633 } 7634 7635 bool isReadOnly = false; 7636 // For historical/compatibility reasons, the read-only qualifier of the 7637 // pointee gets emitted _before_ the '^'. The read-only qualifier of 7638 // the pointer itself gets ignored, _unless_ we are looking at a typedef! 7639 // Also, do not emit the 'r' for anything but the outermost type! 7640 if (isa<TypedefType>(T.getTypePtr())) { 7641 if (Options.IsOutermostType() && T.isConstQualified()) { 7642 isReadOnly = true; 7643 S += 'r'; 7644 } 7645 } else if (Options.IsOutermostType()) { 7646 QualType P = PointeeTy; 7647 while (auto PT = P->getAs<PointerType>()) 7648 P = PT->getPointeeType(); 7649 if (P.isConstQualified()) { 7650 isReadOnly = true; 7651 S += 'r'; 7652 } 7653 } 7654 if (isReadOnly) { 7655 // Another legacy compatibility encoding. Some ObjC qualifier and type 7656 // combinations need to be rearranged. 7657 // Rewrite "in const" from "nr" to "rn" 7658 if (StringRef(S).endswith("nr")) 7659 S.replace(S.end()-2, S.end(), "rn"); 7660 } 7661 7662 if (PointeeTy->isCharType()) { 7663 // char pointer types should be encoded as '*' unless it is a 7664 // type that has been typedef'd to 'BOOL'. 7665 if (!isTypeTypedefedAsBOOL(PointeeTy)) { 7666 S += '*'; 7667 return; 7668 } 7669 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) { 7670 // GCC binary compat: Need to convert "struct objc_class *" to "#". 7671 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { 7672 S += '#'; 7673 return; 7674 } 7675 // GCC binary compat: Need to convert "struct objc_object *" to "@". 7676 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { 7677 S += '@'; 7678 return; 7679 } 7680 // If the encoded string for the class includes template names, just emit 7681 // "^v" for pointers to the class. 7682 if (getLangOpts().CPlusPlus && 7683 (!getLangOpts().EncodeCXXClassTemplateSpec && 7684 hasTemplateSpecializationInEncodedString( 7685 RTy, Options.ExpandPointedToStructures()))) { 7686 S += "^v"; 7687 return; 7688 } 7689 // fall through... 7690 } 7691 S += '^'; 7692 getLegacyIntegralTypeEncoding(PointeeTy); 7693 7694 ObjCEncOptions NewOptions; 7695 if (Options.ExpandPointedToStructures()) 7696 NewOptions.setExpandStructures(); 7697 getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, 7698 /*Field=*/nullptr, NotEncodedT); 7699 return; 7700 } 7701 7702 case Type::ConstantArray: 7703 case Type::IncompleteArray: 7704 case Type::VariableArray: { 7705 const auto *AT = cast<ArrayType>(CT); 7706 7707 if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) { 7708 // Incomplete arrays are encoded as a pointer to the array element. 7709 S += '^'; 7710 7711 getObjCEncodingForTypeImpl( 7712 AT->getElementType(), S, 7713 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); 7714 } else { 7715 S += '['; 7716 7717 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 7718 S += llvm::utostr(CAT->getSize().getZExtValue()); 7719 else { 7720 //Variable length arrays are encoded as a regular array with 0 elements. 7721 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && 7722 "Unknown array type!"); 7723 S += '0'; 7724 } 7725 7726 getObjCEncodingForTypeImpl( 7727 AT->getElementType(), S, 7728 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, 7729 NotEncodedT); 7730 S += ']'; 7731 } 7732 return; 7733 } 7734 7735 case Type::FunctionNoProto: 7736 case Type::FunctionProto: 7737 S += '?'; 7738 return; 7739 7740 case Type::Record: { 7741 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); 7742 S += RDecl->isUnion() ? '(' : '{'; 7743 // Anonymous structures print as '?' 7744 if (const IdentifierInfo *II = RDecl->getIdentifier()) { 7745 S += II->getName(); 7746 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { 7747 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); 7748 llvm::raw_string_ostream OS(S); 7749 printTemplateArgumentList(OS, TemplateArgs.asArray(), 7750 getPrintingPolicy()); 7751 } 7752 } else { 7753 S += '?'; 7754 } 7755 if (Options.ExpandStructures()) { 7756 S += '='; 7757 if (!RDecl->isUnion()) { 7758 getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); 7759 } else { 7760 for (const auto *Field : RDecl->fields()) { 7761 if (FD) { 7762 S += '"'; 7763 S += Field->getNameAsString(); 7764 S += '"'; 7765 } 7766 7767 // Special case bit-fields. 7768 if (Field->isBitField()) { 7769 getObjCEncodingForTypeImpl(Field->getType(), S, 7770 ObjCEncOptions().setExpandStructures(), 7771 Field); 7772 } else { 7773 QualType qt = Field->getType(); 7774 getLegacyIntegralTypeEncoding(qt); 7775 getObjCEncodingForTypeImpl( 7776 qt, S, 7777 ObjCEncOptions().setExpandStructures().setIsStructField(), FD, 7778 NotEncodedT); 7779 } 7780 } 7781 } 7782 } 7783 S += RDecl->isUnion() ? ')' : '}'; 7784 return; 7785 } 7786 7787 case Type::BlockPointer: { 7788 const auto *BT = T->castAs<BlockPointerType>(); 7789 S += "@?"; // Unlike a pointer-to-function, which is "^?". 7790 if (Options.EncodeBlockParameters()) { 7791 const auto *FT = BT->getPointeeType()->castAs<FunctionType>(); 7792 7793 S += '<'; 7794 // Block return type 7795 getObjCEncodingForTypeImpl(FT->getReturnType(), S, 7796 Options.forComponentType(), FD, NotEncodedT); 7797 // Block self 7798 S += "@?"; 7799 // Block parameters 7800 if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) { 7801 for (const auto &I : FPT->param_types()) 7802 getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, 7803 NotEncodedT); 7804 } 7805 S += '>'; 7806 } 7807 return; 7808 } 7809 7810 case Type::ObjCObject: { 7811 // hack to match legacy encoding of *id and *Class 7812 QualType Ty = getObjCObjectPointerType(CT); 7813 if (Ty->isObjCIdType()) { 7814 S += "{objc_object=}"; 7815 return; 7816 } 7817 else if (Ty->isObjCClassType()) { 7818 S += "{objc_class=}"; 7819 return; 7820 } 7821 // TODO: Double check to make sure this intentionally falls through. 7822 LLVM_FALLTHROUGH; 7823 } 7824 7825 case Type::ObjCInterface: { 7826 // Ignore protocol qualifiers when mangling at this level. 7827 // @encode(class_name) 7828 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface(); 7829 S += '{'; 7830 S += OI->getObjCRuntimeNameAsString(); 7831 if (Options.ExpandStructures()) { 7832 S += '='; 7833 SmallVector<const ObjCIvarDecl*, 32> Ivars; 7834 DeepCollectObjCIvars(OI, true, Ivars); 7835 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 7836 const FieldDecl *Field = Ivars[i]; 7837 if (Field->isBitField()) 7838 getObjCEncodingForTypeImpl(Field->getType(), S, 7839 ObjCEncOptions().setExpandStructures(), 7840 Field); 7841 else 7842 getObjCEncodingForTypeImpl(Field->getType(), S, 7843 ObjCEncOptions().setExpandStructures(), FD, 7844 NotEncodedT); 7845 } 7846 } 7847 S += '}'; 7848 return; 7849 } 7850 7851 case Type::ObjCObjectPointer: { 7852 const auto *OPT = T->castAs<ObjCObjectPointerType>(); 7853 if (OPT->isObjCIdType()) { 7854 S += '@'; 7855 return; 7856 } 7857 7858 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { 7859 // FIXME: Consider if we need to output qualifiers for 'Class<p>'. 7860 // Since this is a binary compatibility issue, need to consult with 7861 // runtime folks. Fortunately, this is a *very* obscure construct. 7862 S += '#'; 7863 return; 7864 } 7865 7866 if (OPT->isObjCQualifiedIdType()) { 7867 getObjCEncodingForTypeImpl( 7868 getObjCIdType(), S, 7869 Options.keepingOnly(ObjCEncOptions() 7870 .setExpandPointedToStructures() 7871 .setExpandStructures()), 7872 FD); 7873 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { 7874 // Note that we do extended encoding of protocol qualifier list 7875 // Only when doing ivar or property encoding. 7876 S += '"'; 7877 for (const auto *I : OPT->quals()) { 7878 S += '<'; 7879 S += I->getObjCRuntimeNameAsString(); 7880 S += '>'; 7881 } 7882 S += '"'; 7883 } 7884 return; 7885 } 7886 7887 S += '@'; 7888 if (OPT->getInterfaceDecl() && 7889 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { 7890 S += '"'; 7891 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); 7892 for (const auto *I : OPT->quals()) { 7893 S += '<'; 7894 S += I->getObjCRuntimeNameAsString(); 7895 S += '>'; 7896 } 7897 S += '"'; 7898 } 7899 return; 7900 } 7901 7902 // gcc just blithely ignores member pointers. 7903 // FIXME: we should do better than that. 'M' is available. 7904 case Type::MemberPointer: 7905 // This matches gcc's encoding, even though technically it is insufficient. 7906 //FIXME. We should do a better job than gcc. 7907 case Type::Vector: 7908 case Type::ExtVector: 7909 // Until we have a coherent encoding of these three types, issue warning. 7910 if (NotEncodedT) 7911 *NotEncodedT = T; 7912 return; 7913 7914 case Type::ConstantMatrix: 7915 if (NotEncodedT) 7916 *NotEncodedT = T; 7917 return; 7918 7919 // We could see an undeduced auto type here during error recovery. 7920 // Just ignore it. 7921 case Type::Auto: 7922 case Type::DeducedTemplateSpecialization: 7923 return; 7924 7925 case Type::Pipe: 7926 case Type::BitInt: 7927 #define ABSTRACT_TYPE(KIND, BASE) 7928 #define TYPE(KIND, BASE) 7929 #define DEPENDENT_TYPE(KIND, BASE) \ 7930 case Type::KIND: 7931 #define NON_CANONICAL_TYPE(KIND, BASE) \ 7932 case Type::KIND: 7933 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ 7934 case Type::KIND: 7935 #include "clang/AST/TypeNodes.inc" 7936 llvm_unreachable("@encode for dependent type!"); 7937 } 7938 llvm_unreachable("bad type kind!"); 7939 } 7940 7941 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, 7942 std::string &S, 7943 const FieldDecl *FD, 7944 bool includeVBases, 7945 QualType *NotEncodedT) const { 7946 assert(RDecl && "Expected non-null RecordDecl"); 7947 assert(!RDecl->isUnion() && "Should not be called for unions"); 7948 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) 7949 return; 7950 7951 const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); 7952 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; 7953 const ASTRecordLayout &layout = getASTRecordLayout(RDecl); 7954 7955 if (CXXRec) { 7956 for (const auto &BI : CXXRec->bases()) { 7957 if (!BI.isVirtual()) { 7958 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 7959 if (base->isEmpty()) 7960 continue; 7961 uint64_t offs = toBits(layout.getBaseClassOffset(base)); 7962 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 7963 std::make_pair(offs, base)); 7964 } 7965 } 7966 } 7967 7968 unsigned i = 0; 7969 for (FieldDecl *Field : RDecl->fields()) { 7970 if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) 7971 continue; 7972 uint64_t offs = layout.getFieldOffset(i); 7973 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 7974 std::make_pair(offs, Field)); 7975 ++i; 7976 } 7977 7978 if (CXXRec && includeVBases) { 7979 for (const auto &BI : CXXRec->vbases()) { 7980 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 7981 if (base->isEmpty()) 7982 continue; 7983 uint64_t offs = toBits(layout.getVBaseClassOffset(base)); 7984 if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && 7985 FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) 7986 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), 7987 std::make_pair(offs, base)); 7988 } 7989 } 7990 7991 CharUnits size; 7992 if (CXXRec) { 7993 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); 7994 } else { 7995 size = layout.getSize(); 7996 } 7997 7998 #ifndef NDEBUG 7999 uint64_t CurOffs = 0; 8000 #endif 8001 std::multimap<uint64_t, NamedDecl *>::iterator 8002 CurLayObj = FieldOrBaseOffsets.begin(); 8003 8004 if (CXXRec && CXXRec->isDynamicClass() && 8005 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { 8006 if (FD) { 8007 S += "\"_vptr$"; 8008 std::string recname = CXXRec->getNameAsString(); 8009 if (recname.empty()) recname = "?"; 8010 S += recname; 8011 S += '"'; 8012 } 8013 S += "^^?"; 8014 #ifndef NDEBUG 8015 CurOffs += getTypeSize(VoidPtrTy); 8016 #endif 8017 } 8018 8019 if (!RDecl->hasFlexibleArrayMember()) { 8020 // Mark the end of the structure. 8021 uint64_t offs = toBits(size); 8022 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 8023 std::make_pair(offs, nullptr)); 8024 } 8025 8026 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { 8027 #ifndef NDEBUG 8028 assert(CurOffs <= CurLayObj->first); 8029 if (CurOffs < CurLayObj->first) { 8030 uint64_t padding = CurLayObj->first - CurOffs; 8031 // FIXME: There doesn't seem to be a way to indicate in the encoding that 8032 // packing/alignment of members is different that normal, in which case 8033 // the encoding will be out-of-sync with the real layout. 8034 // If the runtime switches to just consider the size of types without 8035 // taking into account alignment, we could make padding explicit in the 8036 // encoding (e.g. using arrays of chars). The encoding strings would be 8037 // longer then though. 8038 CurOffs += padding; 8039 } 8040 #endif 8041 8042 NamedDecl *dcl = CurLayObj->second; 8043 if (!dcl) 8044 break; // reached end of structure. 8045 8046 if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) { 8047 // We expand the bases without their virtual bases since those are going 8048 // in the initial structure. Note that this differs from gcc which 8049 // expands virtual bases each time one is encountered in the hierarchy, 8050 // making the encoding type bigger than it really is. 8051 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, 8052 NotEncodedT); 8053 assert(!base->isEmpty()); 8054 #ifndef NDEBUG 8055 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); 8056 #endif 8057 } else { 8058 const auto *field = cast<FieldDecl>(dcl); 8059 if (FD) { 8060 S += '"'; 8061 S += field->getNameAsString(); 8062 S += '"'; 8063 } 8064 8065 if (field->isBitField()) { 8066 EncodeBitField(this, S, field->getType(), field); 8067 #ifndef NDEBUG 8068 CurOffs += field->getBitWidthValue(*this); 8069 #endif 8070 } else { 8071 QualType qt = field->getType(); 8072 getLegacyIntegralTypeEncoding(qt); 8073 getObjCEncodingForTypeImpl( 8074 qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), 8075 FD, NotEncodedT); 8076 #ifndef NDEBUG 8077 CurOffs += getTypeSize(field->getType()); 8078 #endif 8079 } 8080 } 8081 } 8082 } 8083 8084 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, 8085 std::string& S) const { 8086 if (QT & Decl::OBJC_TQ_In) 8087 S += 'n'; 8088 if (QT & Decl::OBJC_TQ_Inout) 8089 S += 'N'; 8090 if (QT & Decl::OBJC_TQ_Out) 8091 S += 'o'; 8092 if (QT & Decl::OBJC_TQ_Bycopy) 8093 S += 'O'; 8094 if (QT & Decl::OBJC_TQ_Byref) 8095 S += 'R'; 8096 if (QT & Decl::OBJC_TQ_Oneway) 8097 S += 'V'; 8098 } 8099 8100 TypedefDecl *ASTContext::getObjCIdDecl() const { 8101 if (!ObjCIdDecl) { 8102 QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); 8103 T = getObjCObjectPointerType(T); 8104 ObjCIdDecl = buildImplicitTypedef(T, "id"); 8105 } 8106 return ObjCIdDecl; 8107 } 8108 8109 TypedefDecl *ASTContext::getObjCSelDecl() const { 8110 if (!ObjCSelDecl) { 8111 QualType T = getPointerType(ObjCBuiltinSelTy); 8112 ObjCSelDecl = buildImplicitTypedef(T, "SEL"); 8113 } 8114 return ObjCSelDecl; 8115 } 8116 8117 TypedefDecl *ASTContext::getObjCClassDecl() const { 8118 if (!ObjCClassDecl) { 8119 QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); 8120 T = getObjCObjectPointerType(T); 8121 ObjCClassDecl = buildImplicitTypedef(T, "Class"); 8122 } 8123 return ObjCClassDecl; 8124 } 8125 8126 ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { 8127 if (!ObjCProtocolClassDecl) { 8128 ObjCProtocolClassDecl 8129 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), 8130 SourceLocation(), 8131 &Idents.get("Protocol"), 8132 /*typeParamList=*/nullptr, 8133 /*PrevDecl=*/nullptr, 8134 SourceLocation(), true); 8135 } 8136 8137 return ObjCProtocolClassDecl; 8138 } 8139 8140 //===----------------------------------------------------------------------===// 8141 // __builtin_va_list Construction Functions 8142 //===----------------------------------------------------------------------===// 8143 8144 static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, 8145 StringRef Name) { 8146 // typedef char* __builtin[_ms]_va_list; 8147 QualType T = Context->getPointerType(Context->CharTy); 8148 return Context->buildImplicitTypedef(T, Name); 8149 } 8150 8151 static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { 8152 return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); 8153 } 8154 8155 static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { 8156 return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); 8157 } 8158 8159 static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { 8160 // typedef void* __builtin_va_list; 8161 QualType T = Context->getPointerType(Context->VoidTy); 8162 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8163 } 8164 8165 static TypedefDecl * 8166 CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { 8167 // struct __va_list 8168 RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); 8169 if (Context->getLangOpts().CPlusPlus) { 8170 // namespace std { struct __va_list { 8171 NamespaceDecl *NS; 8172 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 8173 Context->getTranslationUnitDecl(), 8174 /*Inline*/ false, SourceLocation(), 8175 SourceLocation(), &Context->Idents.get("std"), 8176 /*PrevDecl*/ nullptr); 8177 NS->setImplicit(); 8178 VaListTagDecl->setDeclContext(NS); 8179 } 8180 8181 VaListTagDecl->startDefinition(); 8182 8183 const size_t NumFields = 5; 8184 QualType FieldTypes[NumFields]; 8185 const char *FieldNames[NumFields]; 8186 8187 // void *__stack; 8188 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8189 FieldNames[0] = "__stack"; 8190 8191 // void *__gr_top; 8192 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8193 FieldNames[1] = "__gr_top"; 8194 8195 // void *__vr_top; 8196 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8197 FieldNames[2] = "__vr_top"; 8198 8199 // int __gr_offs; 8200 FieldTypes[3] = Context->IntTy; 8201 FieldNames[3] = "__gr_offs"; 8202 8203 // int __vr_offs; 8204 FieldTypes[4] = Context->IntTy; 8205 FieldNames[4] = "__vr_offs"; 8206 8207 // Create fields 8208 for (unsigned i = 0; i < NumFields; ++i) { 8209 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8210 VaListTagDecl, 8211 SourceLocation(), 8212 SourceLocation(), 8213 &Context->Idents.get(FieldNames[i]), 8214 FieldTypes[i], /*TInfo=*/nullptr, 8215 /*BitWidth=*/nullptr, 8216 /*Mutable=*/false, 8217 ICIS_NoInit); 8218 Field->setAccess(AS_public); 8219 VaListTagDecl->addDecl(Field); 8220 } 8221 VaListTagDecl->completeDefinition(); 8222 Context->VaListTagDecl = VaListTagDecl; 8223 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8224 8225 // } __builtin_va_list; 8226 return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); 8227 } 8228 8229 static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { 8230 // typedef struct __va_list_tag { 8231 RecordDecl *VaListTagDecl; 8232 8233 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8234 VaListTagDecl->startDefinition(); 8235 8236 const size_t NumFields = 5; 8237 QualType FieldTypes[NumFields]; 8238 const char *FieldNames[NumFields]; 8239 8240 // unsigned char gpr; 8241 FieldTypes[0] = Context->UnsignedCharTy; 8242 FieldNames[0] = "gpr"; 8243 8244 // unsigned char fpr; 8245 FieldTypes[1] = Context->UnsignedCharTy; 8246 FieldNames[1] = "fpr"; 8247 8248 // unsigned short reserved; 8249 FieldTypes[2] = Context->UnsignedShortTy; 8250 FieldNames[2] = "reserved"; 8251 8252 // void* overflow_arg_area; 8253 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8254 FieldNames[3] = "overflow_arg_area"; 8255 8256 // void* reg_save_area; 8257 FieldTypes[4] = Context->getPointerType(Context->VoidTy); 8258 FieldNames[4] = "reg_save_area"; 8259 8260 // Create fields 8261 for (unsigned i = 0; i < NumFields; ++i) { 8262 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, 8263 SourceLocation(), 8264 SourceLocation(), 8265 &Context->Idents.get(FieldNames[i]), 8266 FieldTypes[i], /*TInfo=*/nullptr, 8267 /*BitWidth=*/nullptr, 8268 /*Mutable=*/false, 8269 ICIS_NoInit); 8270 Field->setAccess(AS_public); 8271 VaListTagDecl->addDecl(Field); 8272 } 8273 VaListTagDecl->completeDefinition(); 8274 Context->VaListTagDecl = VaListTagDecl; 8275 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8276 8277 // } __va_list_tag; 8278 TypedefDecl *VaListTagTypedefDecl = 8279 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8280 8281 QualType VaListTagTypedefType = 8282 Context->getTypedefType(VaListTagTypedefDecl); 8283 8284 // typedef __va_list_tag __builtin_va_list[1]; 8285 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8286 QualType VaListTagArrayType 8287 = Context->getConstantArrayType(VaListTagTypedefType, 8288 Size, nullptr, ArrayType::Normal, 0); 8289 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8290 } 8291 8292 static TypedefDecl * 8293 CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { 8294 // struct __va_list_tag { 8295 RecordDecl *VaListTagDecl; 8296 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8297 VaListTagDecl->startDefinition(); 8298 8299 const size_t NumFields = 4; 8300 QualType FieldTypes[NumFields]; 8301 const char *FieldNames[NumFields]; 8302 8303 // unsigned gp_offset; 8304 FieldTypes[0] = Context->UnsignedIntTy; 8305 FieldNames[0] = "gp_offset"; 8306 8307 // unsigned fp_offset; 8308 FieldTypes[1] = Context->UnsignedIntTy; 8309 FieldNames[1] = "fp_offset"; 8310 8311 // void* overflow_arg_area; 8312 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8313 FieldNames[2] = "overflow_arg_area"; 8314 8315 // void* reg_save_area; 8316 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8317 FieldNames[3] = "reg_save_area"; 8318 8319 // Create fields 8320 for (unsigned i = 0; i < NumFields; ++i) { 8321 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8322 VaListTagDecl, 8323 SourceLocation(), 8324 SourceLocation(), 8325 &Context->Idents.get(FieldNames[i]), 8326 FieldTypes[i], /*TInfo=*/nullptr, 8327 /*BitWidth=*/nullptr, 8328 /*Mutable=*/false, 8329 ICIS_NoInit); 8330 Field->setAccess(AS_public); 8331 VaListTagDecl->addDecl(Field); 8332 } 8333 VaListTagDecl->completeDefinition(); 8334 Context->VaListTagDecl = VaListTagDecl; 8335 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8336 8337 // }; 8338 8339 // typedef struct __va_list_tag __builtin_va_list[1]; 8340 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8341 QualType VaListTagArrayType = Context->getConstantArrayType( 8342 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 8343 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8344 } 8345 8346 static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { 8347 // typedef int __builtin_va_list[4]; 8348 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); 8349 QualType IntArrayType = Context->getConstantArrayType( 8350 Context->IntTy, Size, nullptr, ArrayType::Normal, 0); 8351 return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); 8352 } 8353 8354 static TypedefDecl * 8355 CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { 8356 // struct __va_list 8357 RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); 8358 if (Context->getLangOpts().CPlusPlus) { 8359 // namespace std { struct __va_list { 8360 NamespaceDecl *NS; 8361 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 8362 Context->getTranslationUnitDecl(), 8363 /*Inline*/false, SourceLocation(), 8364 SourceLocation(), &Context->Idents.get("std"), 8365 /*PrevDecl*/ nullptr); 8366 NS->setImplicit(); 8367 VaListDecl->setDeclContext(NS); 8368 } 8369 8370 VaListDecl->startDefinition(); 8371 8372 // void * __ap; 8373 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8374 VaListDecl, 8375 SourceLocation(), 8376 SourceLocation(), 8377 &Context->Idents.get("__ap"), 8378 Context->getPointerType(Context->VoidTy), 8379 /*TInfo=*/nullptr, 8380 /*BitWidth=*/nullptr, 8381 /*Mutable=*/false, 8382 ICIS_NoInit); 8383 Field->setAccess(AS_public); 8384 VaListDecl->addDecl(Field); 8385 8386 // }; 8387 VaListDecl->completeDefinition(); 8388 Context->VaListTagDecl = VaListDecl; 8389 8390 // typedef struct __va_list __builtin_va_list; 8391 QualType T = Context->getRecordType(VaListDecl); 8392 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8393 } 8394 8395 static TypedefDecl * 8396 CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { 8397 // struct __va_list_tag { 8398 RecordDecl *VaListTagDecl; 8399 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8400 VaListTagDecl->startDefinition(); 8401 8402 const size_t NumFields = 4; 8403 QualType FieldTypes[NumFields]; 8404 const char *FieldNames[NumFields]; 8405 8406 // long __gpr; 8407 FieldTypes[0] = Context->LongTy; 8408 FieldNames[0] = "__gpr"; 8409 8410 // long __fpr; 8411 FieldTypes[1] = Context->LongTy; 8412 FieldNames[1] = "__fpr"; 8413 8414 // void *__overflow_arg_area; 8415 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8416 FieldNames[2] = "__overflow_arg_area"; 8417 8418 // void *__reg_save_area; 8419 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8420 FieldNames[3] = "__reg_save_area"; 8421 8422 // Create fields 8423 for (unsigned i = 0; i < NumFields; ++i) { 8424 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8425 VaListTagDecl, 8426 SourceLocation(), 8427 SourceLocation(), 8428 &Context->Idents.get(FieldNames[i]), 8429 FieldTypes[i], /*TInfo=*/nullptr, 8430 /*BitWidth=*/nullptr, 8431 /*Mutable=*/false, 8432 ICIS_NoInit); 8433 Field->setAccess(AS_public); 8434 VaListTagDecl->addDecl(Field); 8435 } 8436 VaListTagDecl->completeDefinition(); 8437 Context->VaListTagDecl = VaListTagDecl; 8438 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8439 8440 // }; 8441 8442 // typedef __va_list_tag __builtin_va_list[1]; 8443 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8444 QualType VaListTagArrayType = Context->getConstantArrayType( 8445 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 8446 8447 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8448 } 8449 8450 static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { 8451 // typedef struct __va_list_tag { 8452 RecordDecl *VaListTagDecl; 8453 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8454 VaListTagDecl->startDefinition(); 8455 8456 const size_t NumFields = 3; 8457 QualType FieldTypes[NumFields]; 8458 const char *FieldNames[NumFields]; 8459 8460 // void *CurrentSavedRegisterArea; 8461 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8462 FieldNames[0] = "__current_saved_reg_area_pointer"; 8463 8464 // void *SavedRegAreaEnd; 8465 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8466 FieldNames[1] = "__saved_reg_area_end_pointer"; 8467 8468 // void *OverflowArea; 8469 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8470 FieldNames[2] = "__overflow_area_pointer"; 8471 8472 // Create fields 8473 for (unsigned i = 0; i < NumFields; ++i) { 8474 FieldDecl *Field = FieldDecl::Create( 8475 const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(), 8476 SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], 8477 /*TInfo=*/0, 8478 /*BitWidth=*/0, 8479 /*Mutable=*/false, ICIS_NoInit); 8480 Field->setAccess(AS_public); 8481 VaListTagDecl->addDecl(Field); 8482 } 8483 VaListTagDecl->completeDefinition(); 8484 Context->VaListTagDecl = VaListTagDecl; 8485 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8486 8487 // } __va_list_tag; 8488 TypedefDecl *VaListTagTypedefDecl = 8489 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8490 8491 QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); 8492 8493 // typedef __va_list_tag __builtin_va_list[1]; 8494 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8495 QualType VaListTagArrayType = Context->getConstantArrayType( 8496 VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0); 8497 8498 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8499 } 8500 8501 static TypedefDecl *CreateVaListDecl(const ASTContext *Context, 8502 TargetInfo::BuiltinVaListKind Kind) { 8503 switch (Kind) { 8504 case TargetInfo::CharPtrBuiltinVaList: 8505 return CreateCharPtrBuiltinVaListDecl(Context); 8506 case TargetInfo::VoidPtrBuiltinVaList: 8507 return CreateVoidPtrBuiltinVaListDecl(Context); 8508 case TargetInfo::AArch64ABIBuiltinVaList: 8509 return CreateAArch64ABIBuiltinVaListDecl(Context); 8510 case TargetInfo::PowerABIBuiltinVaList: 8511 return CreatePowerABIBuiltinVaListDecl(Context); 8512 case TargetInfo::X86_64ABIBuiltinVaList: 8513 return CreateX86_64ABIBuiltinVaListDecl(Context); 8514 case TargetInfo::PNaClABIBuiltinVaList: 8515 return CreatePNaClABIBuiltinVaListDecl(Context); 8516 case TargetInfo::AAPCSABIBuiltinVaList: 8517 return CreateAAPCSABIBuiltinVaListDecl(Context); 8518 case TargetInfo::SystemZBuiltinVaList: 8519 return CreateSystemZBuiltinVaListDecl(Context); 8520 case TargetInfo::HexagonBuiltinVaList: 8521 return CreateHexagonBuiltinVaListDecl(Context); 8522 } 8523 8524 llvm_unreachable("Unhandled __builtin_va_list type kind"); 8525 } 8526 8527 TypedefDecl *ASTContext::getBuiltinVaListDecl() const { 8528 if (!BuiltinVaListDecl) { 8529 BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); 8530 assert(BuiltinVaListDecl->isImplicit()); 8531 } 8532 8533 return BuiltinVaListDecl; 8534 } 8535 8536 Decl *ASTContext::getVaListTagDecl() const { 8537 // Force the creation of VaListTagDecl by building the __builtin_va_list 8538 // declaration. 8539 if (!VaListTagDecl) 8540 (void)getBuiltinVaListDecl(); 8541 8542 return VaListTagDecl; 8543 } 8544 8545 TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { 8546 if (!BuiltinMSVaListDecl) 8547 BuiltinMSVaListDecl = CreateMSVaListDecl(this); 8548 8549 return BuiltinMSVaListDecl; 8550 } 8551 8552 bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { 8553 return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); 8554 } 8555 8556 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { 8557 assert(ObjCConstantStringType.isNull() && 8558 "'NSConstantString' type already set!"); 8559 8560 ObjCConstantStringType = getObjCInterfaceType(Decl); 8561 } 8562 8563 /// Retrieve the template name that corresponds to a non-empty 8564 /// lookup. 8565 TemplateName 8566 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, 8567 UnresolvedSetIterator End) const { 8568 unsigned size = End - Begin; 8569 assert(size > 1 && "set is not overloaded!"); 8570 8571 void *memory = Allocate(sizeof(OverloadedTemplateStorage) + 8572 size * sizeof(FunctionTemplateDecl*)); 8573 auto *OT = new (memory) OverloadedTemplateStorage(size); 8574 8575 NamedDecl **Storage = OT->getStorage(); 8576 for (UnresolvedSetIterator I = Begin; I != End; ++I) { 8577 NamedDecl *D = *I; 8578 assert(isa<FunctionTemplateDecl>(D) || 8579 isa<UnresolvedUsingValueDecl>(D) || 8580 (isa<UsingShadowDecl>(D) && 8581 isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); 8582 *Storage++ = D; 8583 } 8584 8585 return TemplateName(OT); 8586 } 8587 8588 /// Retrieve a template name representing an unqualified-id that has been 8589 /// assumed to name a template for ADL purposes. 8590 TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { 8591 auto *OT = new (*this) AssumedTemplateStorage(Name); 8592 return TemplateName(OT); 8593 } 8594 8595 /// Retrieve the template name that represents a qualified 8596 /// template name such as \c std::vector. 8597 TemplateName 8598 ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, 8599 bool TemplateKeyword, 8600 TemplateDecl *Template) const { 8601 assert(NNS && "Missing nested-name-specifier in qualified template name"); 8602 8603 // FIXME: Canonicalization? 8604 llvm::FoldingSetNodeID ID; 8605 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); 8606 8607 void *InsertPos = nullptr; 8608 QualifiedTemplateName *QTN = 8609 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 8610 if (!QTN) { 8611 QTN = new (*this, alignof(QualifiedTemplateName)) 8612 QualifiedTemplateName(NNS, TemplateKeyword, Template); 8613 QualifiedTemplateNames.InsertNode(QTN, InsertPos); 8614 } 8615 8616 return TemplateName(QTN); 8617 } 8618 8619 /// Retrieve the template name that represents a dependent 8620 /// template name such as \c MetaFun::template apply. 8621 TemplateName 8622 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 8623 const IdentifierInfo *Name) const { 8624 assert((!NNS || NNS->isDependent()) && 8625 "Nested name specifier must be dependent"); 8626 8627 llvm::FoldingSetNodeID ID; 8628 DependentTemplateName::Profile(ID, NNS, Name); 8629 8630 void *InsertPos = nullptr; 8631 DependentTemplateName *QTN = 8632 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 8633 8634 if (QTN) 8635 return TemplateName(QTN); 8636 8637 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 8638 if (CanonNNS == NNS) { 8639 QTN = new (*this, alignof(DependentTemplateName)) 8640 DependentTemplateName(NNS, Name); 8641 } else { 8642 TemplateName Canon = getDependentTemplateName(CanonNNS, Name); 8643 QTN = new (*this, alignof(DependentTemplateName)) 8644 DependentTemplateName(NNS, Name, Canon); 8645 DependentTemplateName *CheckQTN = 8646 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 8647 assert(!CheckQTN && "Dependent type name canonicalization broken"); 8648 (void)CheckQTN; 8649 } 8650 8651 DependentTemplateNames.InsertNode(QTN, InsertPos); 8652 return TemplateName(QTN); 8653 } 8654 8655 /// Retrieve the template name that represents a dependent 8656 /// template name such as \c MetaFun::template operator+. 8657 TemplateName 8658 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 8659 OverloadedOperatorKind Operator) const { 8660 assert((!NNS || NNS->isDependent()) && 8661 "Nested name specifier must be dependent"); 8662 8663 llvm::FoldingSetNodeID ID; 8664 DependentTemplateName::Profile(ID, NNS, Operator); 8665 8666 void *InsertPos = nullptr; 8667 DependentTemplateName *QTN 8668 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 8669 8670 if (QTN) 8671 return TemplateName(QTN); 8672 8673 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 8674 if (CanonNNS == NNS) { 8675 QTN = new (*this, alignof(DependentTemplateName)) 8676 DependentTemplateName(NNS, Operator); 8677 } else { 8678 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); 8679 QTN = new (*this, alignof(DependentTemplateName)) 8680 DependentTemplateName(NNS, Operator, Canon); 8681 8682 DependentTemplateName *CheckQTN 8683 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 8684 assert(!CheckQTN && "Dependent template name canonicalization broken"); 8685 (void)CheckQTN; 8686 } 8687 8688 DependentTemplateNames.InsertNode(QTN, InsertPos); 8689 return TemplateName(QTN); 8690 } 8691 8692 TemplateName 8693 ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param, 8694 TemplateName replacement) const { 8695 llvm::FoldingSetNodeID ID; 8696 SubstTemplateTemplateParmStorage::Profile(ID, param, replacement); 8697 8698 void *insertPos = nullptr; 8699 SubstTemplateTemplateParmStorage *subst 8700 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); 8701 8702 if (!subst) { 8703 subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement); 8704 SubstTemplateTemplateParms.InsertNode(subst, insertPos); 8705 } 8706 8707 return TemplateName(subst); 8708 } 8709 8710 TemplateName 8711 ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param, 8712 const TemplateArgument &ArgPack) const { 8713 auto &Self = const_cast<ASTContext &>(*this); 8714 llvm::FoldingSetNodeID ID; 8715 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack); 8716 8717 void *InsertPos = nullptr; 8718 SubstTemplateTemplateParmPackStorage *Subst 8719 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); 8720 8721 if (!Subst) { 8722 Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param, 8723 ArgPack.pack_size(), 8724 ArgPack.pack_begin()); 8725 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); 8726 } 8727 8728 return TemplateName(Subst); 8729 } 8730 8731 /// getFromTargetType - Given one of the integer types provided by 8732 /// TargetInfo, produce the corresponding type. The unsigned @p Type 8733 /// is actually a value of type @c TargetInfo::IntType. 8734 CanQualType ASTContext::getFromTargetType(unsigned Type) const { 8735 switch (Type) { 8736 case TargetInfo::NoInt: return {}; 8737 case TargetInfo::SignedChar: return SignedCharTy; 8738 case TargetInfo::UnsignedChar: return UnsignedCharTy; 8739 case TargetInfo::SignedShort: return ShortTy; 8740 case TargetInfo::UnsignedShort: return UnsignedShortTy; 8741 case TargetInfo::SignedInt: return IntTy; 8742 case TargetInfo::UnsignedInt: return UnsignedIntTy; 8743 case TargetInfo::SignedLong: return LongTy; 8744 case TargetInfo::UnsignedLong: return UnsignedLongTy; 8745 case TargetInfo::SignedLongLong: return LongLongTy; 8746 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; 8747 } 8748 8749 llvm_unreachable("Unhandled TargetInfo::IntType value"); 8750 } 8751 8752 //===----------------------------------------------------------------------===// 8753 // Type Predicates. 8754 //===----------------------------------------------------------------------===// 8755 8756 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's 8757 /// garbage collection attribute. 8758 /// 8759 Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { 8760 if (getLangOpts().getGC() == LangOptions::NonGC) 8761 return Qualifiers::GCNone; 8762 8763 assert(getLangOpts().ObjC); 8764 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); 8765 8766 // Default behaviour under objective-C's gc is for ObjC pointers 8767 // (or pointers to them) be treated as though they were declared 8768 // as __strong. 8769 if (GCAttrs == Qualifiers::GCNone) { 8770 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 8771 return Qualifiers::Strong; 8772 else if (Ty->isPointerType()) 8773 return getObjCGCAttrKind(Ty->castAs<PointerType>()->getPointeeType()); 8774 } else { 8775 // It's not valid to set GC attributes on anything that isn't a 8776 // pointer. 8777 #ifndef NDEBUG 8778 QualType CT = Ty->getCanonicalTypeInternal(); 8779 while (const auto *AT = dyn_cast<ArrayType>(CT)) 8780 CT = AT->getElementType(); 8781 assert(CT->isAnyPointerType() || CT->isBlockPointerType()); 8782 #endif 8783 } 8784 return GCAttrs; 8785 } 8786 8787 //===----------------------------------------------------------------------===// 8788 // Type Compatibility Testing 8789 //===----------------------------------------------------------------------===// 8790 8791 /// areCompatVectorTypes - Return true if the two specified vector types are 8792 /// compatible. 8793 static bool areCompatVectorTypes(const VectorType *LHS, 8794 const VectorType *RHS) { 8795 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 8796 return LHS->getElementType() == RHS->getElementType() && 8797 LHS->getNumElements() == RHS->getNumElements(); 8798 } 8799 8800 /// areCompatMatrixTypes - Return true if the two specified matrix types are 8801 /// compatible. 8802 static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, 8803 const ConstantMatrixType *RHS) { 8804 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 8805 return LHS->getElementType() == RHS->getElementType() && 8806 LHS->getNumRows() == RHS->getNumRows() && 8807 LHS->getNumColumns() == RHS->getNumColumns(); 8808 } 8809 8810 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, 8811 QualType SecondVec) { 8812 assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); 8813 assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); 8814 8815 if (hasSameUnqualifiedType(FirstVec, SecondVec)) 8816 return true; 8817 8818 // Treat Neon vector types and most AltiVec vector types as if they are the 8819 // equivalent GCC vector types. 8820 const auto *First = FirstVec->castAs<VectorType>(); 8821 const auto *Second = SecondVec->castAs<VectorType>(); 8822 if (First->getNumElements() == Second->getNumElements() && 8823 hasSameType(First->getElementType(), Second->getElementType()) && 8824 First->getVectorKind() != VectorType::AltiVecPixel && 8825 First->getVectorKind() != VectorType::AltiVecBool && 8826 Second->getVectorKind() != VectorType::AltiVecPixel && 8827 Second->getVectorKind() != VectorType::AltiVecBool && 8828 First->getVectorKind() != VectorType::SveFixedLengthDataVector && 8829 First->getVectorKind() != VectorType::SveFixedLengthPredicateVector && 8830 Second->getVectorKind() != VectorType::SveFixedLengthDataVector && 8831 Second->getVectorKind() != VectorType::SveFixedLengthPredicateVector) 8832 return true; 8833 8834 return false; 8835 } 8836 8837 /// getSVETypeSize - Return SVE vector or predicate register size. 8838 static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { 8839 assert(Ty->isVLSTBuiltinType() && "Invalid SVE Type"); 8840 return Ty->getKind() == BuiltinType::SveBool 8841 ? (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth() 8842 : Context.getLangOpts().VScaleMin * 128; 8843 } 8844 8845 bool ASTContext::areCompatibleSveTypes(QualType FirstType, 8846 QualType SecondType) { 8847 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || 8848 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && 8849 "Expected SVE builtin type and vector type!"); 8850 8851 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 8852 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 8853 if (const auto *VT = SecondType->getAs<VectorType>()) { 8854 // Predicates have the same representation as uint8 so we also have to 8855 // check the kind to make these types incompatible. 8856 if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 8857 return BT->getKind() == BuiltinType::SveBool; 8858 else if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 8859 return VT->getElementType().getCanonicalType() == 8860 FirstType->getSveEltType(*this); 8861 else if (VT->getVectorKind() == VectorType::GenericVector) 8862 return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && 8863 hasSameType(VT->getElementType(), 8864 getBuiltinVectorTypeInfo(BT).ElementType); 8865 } 8866 } 8867 return false; 8868 }; 8869 8870 return IsValidCast(FirstType, SecondType) || 8871 IsValidCast(SecondType, FirstType); 8872 } 8873 8874 bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, 8875 QualType SecondType) { 8876 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || 8877 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && 8878 "Expected SVE builtin type and vector type!"); 8879 8880 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 8881 const auto *BT = FirstType->getAs<BuiltinType>(); 8882 if (!BT) 8883 return false; 8884 8885 const auto *VecTy = SecondType->getAs<VectorType>(); 8886 if (VecTy && 8887 (VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector || 8888 VecTy->getVectorKind() == VectorType::GenericVector)) { 8889 const LangOptions::LaxVectorConversionKind LVCKind = 8890 getLangOpts().getLaxVectorConversions(); 8891 8892 // Can not convert between sve predicates and sve vectors because of 8893 // different size. 8894 if (BT->getKind() == BuiltinType::SveBool && 8895 VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector) 8896 return false; 8897 8898 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. 8899 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly 8900 // converts to VLAT and VLAT implicitly converts to GNUT." 8901 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and 8902 // predicates. 8903 if (VecTy->getVectorKind() == VectorType::GenericVector && 8904 getTypeSize(SecondType) != getSVETypeSize(*this, BT)) 8905 return false; 8906 8907 // If -flax-vector-conversions=all is specified, the types are 8908 // certainly compatible. 8909 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 8910 return true; 8911 8912 // If -flax-vector-conversions=integer is specified, the types are 8913 // compatible if the elements are integer types. 8914 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 8915 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 8916 FirstType->getSveEltType(*this)->isIntegerType(); 8917 } 8918 8919 return false; 8920 }; 8921 8922 return IsLaxCompatible(FirstType, SecondType) || 8923 IsLaxCompatible(SecondType, FirstType); 8924 } 8925 8926 bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { 8927 while (true) { 8928 // __strong id 8929 if (const AttributedType *Attr = dyn_cast<AttributedType>(Ty)) { 8930 if (Attr->getAttrKind() == attr::ObjCOwnership) 8931 return true; 8932 8933 Ty = Attr->getModifiedType(); 8934 8935 // X *__strong (...) 8936 } else if (const ParenType *Paren = dyn_cast<ParenType>(Ty)) { 8937 Ty = Paren->getInnerType(); 8938 8939 // We do not want to look through typedefs, typeof(expr), 8940 // typeof(type), or any other way that the type is somehow 8941 // abstracted. 8942 } else { 8943 return false; 8944 } 8945 } 8946 } 8947 8948 //===----------------------------------------------------------------------===// 8949 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. 8950 //===----------------------------------------------------------------------===// 8951 8952 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the 8953 /// inheritance hierarchy of 'rProto'. 8954 bool 8955 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, 8956 ObjCProtocolDecl *rProto) const { 8957 if (declaresSameEntity(lProto, rProto)) 8958 return true; 8959 for (auto *PI : rProto->protocols()) 8960 if (ProtocolCompatibleWithProtocol(lProto, PI)) 8961 return true; 8962 return false; 8963 } 8964 8965 /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and 8966 /// Class<pr1, ...>. 8967 bool ASTContext::ObjCQualifiedClassTypesAreCompatible( 8968 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { 8969 for (auto *lhsProto : lhs->quals()) { 8970 bool match = false; 8971 for (auto *rhsProto : rhs->quals()) { 8972 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { 8973 match = true; 8974 break; 8975 } 8976 } 8977 if (!match) 8978 return false; 8979 } 8980 return true; 8981 } 8982 8983 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an 8984 /// ObjCQualifiedIDType. 8985 bool ASTContext::ObjCQualifiedIdTypesAreCompatible( 8986 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, 8987 bool compare) { 8988 // Allow id<P..> and an 'id' in all cases. 8989 if (lhs->isObjCIdType() || rhs->isObjCIdType()) 8990 return true; 8991 8992 // Don't allow id<P..> to convert to Class or Class<P..> in either direction. 8993 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || 8994 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) 8995 return false; 8996 8997 if (lhs->isObjCQualifiedIdType()) { 8998 if (rhs->qual_empty()) { 8999 // If the RHS is a unqualified interface pointer "NSString*", 9000 // make sure we check the class hierarchy. 9001 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9002 for (auto *I : lhs->quals()) { 9003 // when comparing an id<P> on lhs with a static type on rhs, 9004 // see if static class implements all of id's protocols, directly or 9005 // through its super class and categories. 9006 if (!rhsID->ClassImplementsProtocol(I, true)) 9007 return false; 9008 } 9009 } 9010 // If there are no qualifiers and no interface, we have an 'id'. 9011 return true; 9012 } 9013 // Both the right and left sides have qualifiers. 9014 for (auto *lhsProto : lhs->quals()) { 9015 bool match = false; 9016 9017 // when comparing an id<P> on lhs with a static type on rhs, 9018 // see if static class implements all of id's protocols, directly or 9019 // through its super class and categories. 9020 for (auto *rhsProto : rhs->quals()) { 9021 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9022 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9023 match = true; 9024 break; 9025 } 9026 } 9027 // If the RHS is a qualified interface pointer "NSString<P>*", 9028 // make sure we check the class hierarchy. 9029 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 9030 for (auto *I : lhs->quals()) { 9031 // when comparing an id<P> on lhs with a static type on rhs, 9032 // see if static class implements all of id's protocols, directly or 9033 // through its super class and categories. 9034 if (rhsID->ClassImplementsProtocol(I, true)) { 9035 match = true; 9036 break; 9037 } 9038 } 9039 } 9040 if (!match) 9041 return false; 9042 } 9043 9044 return true; 9045 } 9046 9047 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>"); 9048 9049 if (lhs->getInterfaceType()) { 9050 // If both the right and left sides have qualifiers. 9051 for (auto *lhsProto : lhs->quals()) { 9052 bool match = false; 9053 9054 // when comparing an id<P> on rhs with a static type on lhs, 9055 // see if static class implements all of id's protocols, directly or 9056 // through its super class and categories. 9057 // First, lhs protocols in the qualifier list must be found, direct 9058 // or indirect in rhs's qualifier list or it is a mismatch. 9059 for (auto *rhsProto : rhs->quals()) { 9060 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9061 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9062 match = true; 9063 break; 9064 } 9065 } 9066 if (!match) 9067 return false; 9068 } 9069 9070 // Static class's protocols, or its super class or category protocols 9071 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. 9072 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { 9073 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 9074 CollectInheritedProtocols(lhsID, LHSInheritedProtocols); 9075 // This is rather dubious but matches gcc's behavior. If lhs has 9076 // no type qualifier and its class has no static protocol(s) 9077 // assume that it is mismatch. 9078 if (LHSInheritedProtocols.empty() && lhs->qual_empty()) 9079 return false; 9080 for (auto *lhsProto : LHSInheritedProtocols) { 9081 bool match = false; 9082 for (auto *rhsProto : rhs->quals()) { 9083 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9084 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9085 match = true; 9086 break; 9087 } 9088 } 9089 if (!match) 9090 return false; 9091 } 9092 } 9093 return true; 9094 } 9095 return false; 9096 } 9097 9098 /// canAssignObjCInterfaces - Return true if the two interface types are 9099 /// compatible for assignment from RHS to LHS. This handles validation of any 9100 /// protocol qualifiers on the LHS or RHS. 9101 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, 9102 const ObjCObjectPointerType *RHSOPT) { 9103 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9104 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9105 9106 // If either type represents the built-in 'id' type, return true. 9107 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) 9108 return true; 9109 9110 // Function object that propagates a successful result or handles 9111 // __kindof types. 9112 auto finish = [&](bool succeeded) -> bool { 9113 if (succeeded) 9114 return true; 9115 9116 if (!RHS->isKindOfType()) 9117 return false; 9118 9119 // Strip off __kindof and protocol qualifiers, then check whether 9120 // we can assign the other way. 9121 return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9122 LHSOPT->stripObjCKindOfTypeAndQuals(*this)); 9123 }; 9124 9125 // Casts from or to id<P> are allowed when the other side has compatible 9126 // protocols. 9127 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { 9128 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); 9129 } 9130 9131 // Verify protocol compatibility for casts from Class<P1> to Class<P2>. 9132 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { 9133 return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); 9134 } 9135 9136 // Casts from Class to Class<Foo>, or vice-versa, are allowed. 9137 if (LHS->isObjCClass() && RHS->isObjCClass()) { 9138 return true; 9139 } 9140 9141 // If we have 2 user-defined types, fall into that path. 9142 if (LHS->getInterface() && RHS->getInterface()) { 9143 return finish(canAssignObjCInterfaces(LHS, RHS)); 9144 } 9145 9146 return false; 9147 } 9148 9149 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written 9150 /// for providing type-safety for objective-c pointers used to pass/return 9151 /// arguments in block literals. When passed as arguments, passing 'A*' where 9152 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is 9153 /// not OK. For the return type, the opposite is not OK. 9154 bool ASTContext::canAssignObjCInterfacesInBlockPointer( 9155 const ObjCObjectPointerType *LHSOPT, 9156 const ObjCObjectPointerType *RHSOPT, 9157 bool BlockReturnType) { 9158 9159 // Function object that propagates a successful result or handles 9160 // __kindof types. 9161 auto finish = [&](bool succeeded) -> bool { 9162 if (succeeded) 9163 return true; 9164 9165 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; 9166 if (!Expected->isKindOfType()) 9167 return false; 9168 9169 // Strip off __kindof and protocol qualifiers, then check whether 9170 // we can assign the other way. 9171 return canAssignObjCInterfacesInBlockPointer( 9172 RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9173 LHSOPT->stripObjCKindOfTypeAndQuals(*this), 9174 BlockReturnType); 9175 }; 9176 9177 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) 9178 return true; 9179 9180 if (LHSOPT->isObjCBuiltinType()) { 9181 return finish(RHSOPT->isObjCBuiltinType() || 9182 RHSOPT->isObjCQualifiedIdType()); 9183 } 9184 9185 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { 9186 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) 9187 // Use for block parameters previous type checking for compatibility. 9188 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || 9189 // Or corrected type checking as in non-compat mode. 9190 (!BlockReturnType && 9191 ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); 9192 else 9193 return finish(ObjCQualifiedIdTypesAreCompatible( 9194 (BlockReturnType ? LHSOPT : RHSOPT), 9195 (BlockReturnType ? RHSOPT : LHSOPT), false)); 9196 } 9197 9198 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); 9199 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); 9200 if (LHS && RHS) { // We have 2 user-defined types. 9201 if (LHS != RHS) { 9202 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) 9203 return finish(BlockReturnType); 9204 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) 9205 return finish(!BlockReturnType); 9206 } 9207 else 9208 return true; 9209 } 9210 return false; 9211 } 9212 9213 /// Comparison routine for Objective-C protocols to be used with 9214 /// llvm::array_pod_sort. 9215 static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, 9216 ObjCProtocolDecl * const *rhs) { 9217 return (*lhs)->getName().compare((*rhs)->getName()); 9218 } 9219 9220 /// getIntersectionOfProtocols - This routine finds the intersection of set 9221 /// of protocols inherited from two distinct objective-c pointer objects with 9222 /// the given common base. 9223 /// It is used to build composite qualifier list of the composite type of 9224 /// the conditional expression involving two objective-c pointer objects. 9225 static 9226 void getIntersectionOfProtocols(ASTContext &Context, 9227 const ObjCInterfaceDecl *CommonBase, 9228 const ObjCObjectPointerType *LHSOPT, 9229 const ObjCObjectPointerType *RHSOPT, 9230 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) { 9231 9232 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9233 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9234 assert(LHS->getInterface() && "LHS must have an interface base"); 9235 assert(RHS->getInterface() && "RHS must have an interface base"); 9236 9237 // Add all of the protocols for the LHS. 9238 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet; 9239 9240 // Start with the protocol qualifiers. 9241 for (auto proto : LHS->quals()) { 9242 Context.CollectInheritedProtocols(proto, LHSProtocolSet); 9243 } 9244 9245 // Also add the protocols associated with the LHS interface. 9246 Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); 9247 9248 // Add all of the protocols for the RHS. 9249 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet; 9250 9251 // Start with the protocol qualifiers. 9252 for (auto proto : RHS->quals()) { 9253 Context.CollectInheritedProtocols(proto, RHSProtocolSet); 9254 } 9255 9256 // Also add the protocols associated with the RHS interface. 9257 Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); 9258 9259 // Compute the intersection of the collected protocol sets. 9260 for (auto proto : LHSProtocolSet) { 9261 if (RHSProtocolSet.count(proto)) 9262 IntersectionSet.push_back(proto); 9263 } 9264 9265 // Compute the set of protocols that is implied by either the common type or 9266 // the protocols within the intersection. 9267 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols; 9268 Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); 9269 9270 // Remove any implied protocols from the list of inherited protocols. 9271 if (!ImpliedProtocols.empty()) { 9272 llvm::erase_if(IntersectionSet, [&](ObjCProtocolDecl *proto) -> bool { 9273 return ImpliedProtocols.contains(proto); 9274 }); 9275 } 9276 9277 // Sort the remaining protocols by name. 9278 llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), 9279 compareObjCProtocolsByName); 9280 } 9281 9282 /// Determine whether the first type is a subtype of the second. 9283 static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, 9284 QualType rhs) { 9285 // Common case: two object pointers. 9286 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>(); 9287 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 9288 if (lhsOPT && rhsOPT) 9289 return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); 9290 9291 // Two block pointers. 9292 const auto *lhsBlock = lhs->getAs<BlockPointerType>(); 9293 const auto *rhsBlock = rhs->getAs<BlockPointerType>(); 9294 if (lhsBlock && rhsBlock) 9295 return ctx.typesAreBlockPointerCompatible(lhs, rhs); 9296 9297 // If either is an unqualified 'id' and the other is a block, it's 9298 // acceptable. 9299 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || 9300 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) 9301 return true; 9302 9303 return false; 9304 } 9305 9306 // Check that the given Objective-C type argument lists are equivalent. 9307 static bool sameObjCTypeArgs(ASTContext &ctx, 9308 const ObjCInterfaceDecl *iface, 9309 ArrayRef<QualType> lhsArgs, 9310 ArrayRef<QualType> rhsArgs, 9311 bool stripKindOf) { 9312 if (lhsArgs.size() != rhsArgs.size()) 9313 return false; 9314 9315 ObjCTypeParamList *typeParams = iface->getTypeParamList(); 9316 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { 9317 if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) 9318 continue; 9319 9320 switch (typeParams->begin()[i]->getVariance()) { 9321 case ObjCTypeParamVariance::Invariant: 9322 if (!stripKindOf || 9323 !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), 9324 rhsArgs[i].stripObjCKindOfType(ctx))) { 9325 return false; 9326 } 9327 break; 9328 9329 case ObjCTypeParamVariance::Covariant: 9330 if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) 9331 return false; 9332 break; 9333 9334 case ObjCTypeParamVariance::Contravariant: 9335 if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) 9336 return false; 9337 break; 9338 } 9339 } 9340 9341 return true; 9342 } 9343 9344 QualType ASTContext::areCommonBaseCompatible( 9345 const ObjCObjectPointerType *Lptr, 9346 const ObjCObjectPointerType *Rptr) { 9347 const ObjCObjectType *LHS = Lptr->getObjectType(); 9348 const ObjCObjectType *RHS = Rptr->getObjectType(); 9349 const ObjCInterfaceDecl* LDecl = LHS->getInterface(); 9350 const ObjCInterfaceDecl* RDecl = RHS->getInterface(); 9351 9352 if (!LDecl || !RDecl) 9353 return {}; 9354 9355 // When either LHS or RHS is a kindof type, we should return a kindof type. 9356 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return 9357 // kindof(A). 9358 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); 9359 9360 // Follow the left-hand side up the class hierarchy until we either hit a 9361 // root or find the RHS. Record the ancestors in case we don't find it. 9362 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4> 9363 LHSAncestors; 9364 while (true) { 9365 // Record this ancestor. We'll need this if the common type isn't in the 9366 // path from the LHS to the root. 9367 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; 9368 9369 if (declaresSameEntity(LHS->getInterface(), RDecl)) { 9370 // Get the type arguments. 9371 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten(); 9372 bool anyChanges = false; 9373 if (LHS->isSpecialized() && RHS->isSpecialized()) { 9374 // Both have type arguments, compare them. 9375 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 9376 LHS->getTypeArgs(), RHS->getTypeArgs(), 9377 /*stripKindOf=*/true)) 9378 return {}; 9379 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 9380 // If only one has type arguments, the result will not have type 9381 // arguments. 9382 LHSTypeArgs = {}; 9383 anyChanges = true; 9384 } 9385 9386 // Compute the intersection of protocols. 9387 SmallVector<ObjCProtocolDecl *, 8> Protocols; 9388 getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, 9389 Protocols); 9390 if (!Protocols.empty()) 9391 anyChanges = true; 9392 9393 // If anything in the LHS will have changed, build a new result type. 9394 // If we need to return a kindof type but LHS is not a kindof type, we 9395 // build a new result type. 9396 if (anyChanges || LHS->isKindOfType() != anyKindOf) { 9397 QualType Result = getObjCInterfaceType(LHS->getInterface()); 9398 Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, 9399 anyKindOf || LHS->isKindOfType()); 9400 return getObjCObjectPointerType(Result); 9401 } 9402 9403 return getObjCObjectPointerType(QualType(LHS, 0)); 9404 } 9405 9406 // Find the superclass. 9407 QualType LHSSuperType = LHS->getSuperClassType(); 9408 if (LHSSuperType.isNull()) 9409 break; 9410 9411 LHS = LHSSuperType->castAs<ObjCObjectType>(); 9412 } 9413 9414 // We didn't find anything by following the LHS to its root; now check 9415 // the RHS against the cached set of ancestors. 9416 while (true) { 9417 auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); 9418 if (KnownLHS != LHSAncestors.end()) { 9419 LHS = KnownLHS->second; 9420 9421 // Get the type arguments. 9422 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten(); 9423 bool anyChanges = false; 9424 if (LHS->isSpecialized() && RHS->isSpecialized()) { 9425 // Both have type arguments, compare them. 9426 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 9427 LHS->getTypeArgs(), RHS->getTypeArgs(), 9428 /*stripKindOf=*/true)) 9429 return {}; 9430 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 9431 // If only one has type arguments, the result will not have type 9432 // arguments. 9433 RHSTypeArgs = {}; 9434 anyChanges = true; 9435 } 9436 9437 // Compute the intersection of protocols. 9438 SmallVector<ObjCProtocolDecl *, 8> Protocols; 9439 getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, 9440 Protocols); 9441 if (!Protocols.empty()) 9442 anyChanges = true; 9443 9444 // If we need to return a kindof type but RHS is not a kindof type, we 9445 // build a new result type. 9446 if (anyChanges || RHS->isKindOfType() != anyKindOf) { 9447 QualType Result = getObjCInterfaceType(RHS->getInterface()); 9448 Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, 9449 anyKindOf || RHS->isKindOfType()); 9450 return getObjCObjectPointerType(Result); 9451 } 9452 9453 return getObjCObjectPointerType(QualType(RHS, 0)); 9454 } 9455 9456 // Find the superclass of the RHS. 9457 QualType RHSSuperType = RHS->getSuperClassType(); 9458 if (RHSSuperType.isNull()) 9459 break; 9460 9461 RHS = RHSSuperType->castAs<ObjCObjectType>(); 9462 } 9463 9464 return {}; 9465 } 9466 9467 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, 9468 const ObjCObjectType *RHS) { 9469 assert(LHS->getInterface() && "LHS is not an interface type"); 9470 assert(RHS->getInterface() && "RHS is not an interface type"); 9471 9472 // Verify that the base decls are compatible: the RHS must be a subclass of 9473 // the LHS. 9474 ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); 9475 bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); 9476 if (!IsSuperClass) 9477 return false; 9478 9479 // If the LHS has protocol qualifiers, determine whether all of them are 9480 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the 9481 // LHS). 9482 if (LHS->getNumProtocols() > 0) { 9483 // OK if conversion of LHS to SuperClass results in narrowing of types 9484 // ; i.e., SuperClass may implement at least one of the protocols 9485 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. 9486 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. 9487 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; 9488 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); 9489 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's 9490 // qualifiers. 9491 for (auto *RHSPI : RHS->quals()) 9492 CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); 9493 // If there is no protocols associated with RHS, it is not a match. 9494 if (SuperClassInheritedProtocols.empty()) 9495 return false; 9496 9497 for (const auto *LHSProto : LHS->quals()) { 9498 bool SuperImplementsProtocol = false; 9499 for (auto *SuperClassProto : SuperClassInheritedProtocols) 9500 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { 9501 SuperImplementsProtocol = true; 9502 break; 9503 } 9504 if (!SuperImplementsProtocol) 9505 return false; 9506 } 9507 } 9508 9509 // If the LHS is specialized, we may need to check type arguments. 9510 if (LHS->isSpecialized()) { 9511 // Follow the superclass chain until we've matched the LHS class in the 9512 // hierarchy. This substitutes type arguments through. 9513 const ObjCObjectType *RHSSuper = RHS; 9514 while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) 9515 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>(); 9516 9517 // If the RHS is specializd, compare type arguments. 9518 if (RHSSuper->isSpecialized() && 9519 !sameObjCTypeArgs(*this, LHS->getInterface(), 9520 LHS->getTypeArgs(), RHSSuper->getTypeArgs(), 9521 /*stripKindOf=*/true)) { 9522 return false; 9523 } 9524 } 9525 9526 return true; 9527 } 9528 9529 bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { 9530 // get the "pointed to" types 9531 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); 9532 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); 9533 9534 if (!LHSOPT || !RHSOPT) 9535 return false; 9536 9537 return canAssignObjCInterfaces(LHSOPT, RHSOPT) || 9538 canAssignObjCInterfaces(RHSOPT, LHSOPT); 9539 } 9540 9541 bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { 9542 return canAssignObjCInterfaces( 9543 getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(), 9544 getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>()); 9545 } 9546 9547 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, 9548 /// both shall have the identically qualified version of a compatible type. 9549 /// C99 6.2.7p1: Two types have compatible types if their types are the 9550 /// same. See 6.7.[2,3,5] for additional rules. 9551 bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, 9552 bool CompareUnqualified) { 9553 if (getLangOpts().CPlusPlus) 9554 return hasSameType(LHS, RHS); 9555 9556 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); 9557 } 9558 9559 bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { 9560 return typesAreCompatible(LHS, RHS); 9561 } 9562 9563 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { 9564 return !mergeTypes(LHS, RHS, true).isNull(); 9565 } 9566 9567 /// mergeTransparentUnionType - if T is a transparent union type and a member 9568 /// of T is compatible with SubType, return the merged type, else return 9569 /// QualType() 9570 QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, 9571 bool OfBlockPointer, 9572 bool Unqualified) { 9573 if (const RecordType *UT = T->getAsUnionType()) { 9574 RecordDecl *UD = UT->getDecl(); 9575 if (UD->hasAttr<TransparentUnionAttr>()) { 9576 for (const auto *I : UD->fields()) { 9577 QualType ET = I->getType().getUnqualifiedType(); 9578 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); 9579 if (!MT.isNull()) 9580 return MT; 9581 } 9582 } 9583 } 9584 9585 return {}; 9586 } 9587 9588 /// mergeFunctionParameterTypes - merge two types which appear as function 9589 /// parameter types 9590 QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, 9591 bool OfBlockPointer, 9592 bool Unqualified) { 9593 // GNU extension: two types are compatible if they appear as a function 9594 // argument, one of the types is a transparent union type and the other 9595 // type is compatible with a union member 9596 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, 9597 Unqualified); 9598 if (!lmerge.isNull()) 9599 return lmerge; 9600 9601 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, 9602 Unqualified); 9603 if (!rmerge.isNull()) 9604 return rmerge; 9605 9606 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); 9607 } 9608 9609 QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, 9610 bool OfBlockPointer, bool Unqualified, 9611 bool AllowCXX) { 9612 const auto *lbase = lhs->castAs<FunctionType>(); 9613 const auto *rbase = rhs->castAs<FunctionType>(); 9614 const auto *lproto = dyn_cast<FunctionProtoType>(lbase); 9615 const auto *rproto = dyn_cast<FunctionProtoType>(rbase); 9616 bool allLTypes = true; 9617 bool allRTypes = true; 9618 9619 // Check return type 9620 QualType retType; 9621 if (OfBlockPointer) { 9622 QualType RHS = rbase->getReturnType(); 9623 QualType LHS = lbase->getReturnType(); 9624 bool UnqualifiedResult = Unqualified; 9625 if (!UnqualifiedResult) 9626 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); 9627 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); 9628 } 9629 else 9630 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, 9631 Unqualified); 9632 if (retType.isNull()) 9633 return {}; 9634 9635 if (Unqualified) 9636 retType = retType.getUnqualifiedType(); 9637 9638 CanQualType LRetType = getCanonicalType(lbase->getReturnType()); 9639 CanQualType RRetType = getCanonicalType(rbase->getReturnType()); 9640 if (Unqualified) { 9641 LRetType = LRetType.getUnqualifiedType(); 9642 RRetType = RRetType.getUnqualifiedType(); 9643 } 9644 9645 if (getCanonicalType(retType) != LRetType) 9646 allLTypes = false; 9647 if (getCanonicalType(retType) != RRetType) 9648 allRTypes = false; 9649 9650 // FIXME: double check this 9651 // FIXME: should we error if lbase->getRegParmAttr() != 0 && 9652 // rbase->getRegParmAttr() != 0 && 9653 // lbase->getRegParmAttr() != rbase->getRegParmAttr()? 9654 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); 9655 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); 9656 9657 // Compatible functions must have compatible calling conventions 9658 if (lbaseInfo.getCC() != rbaseInfo.getCC()) 9659 return {}; 9660 9661 // Regparm is part of the calling convention. 9662 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) 9663 return {}; 9664 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) 9665 return {}; 9666 9667 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) 9668 return {}; 9669 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) 9670 return {}; 9671 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) 9672 return {}; 9673 9674 // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'. 9675 bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); 9676 9677 if (lbaseInfo.getNoReturn() != NoReturn) 9678 allLTypes = false; 9679 if (rbaseInfo.getNoReturn() != NoReturn) 9680 allRTypes = false; 9681 9682 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); 9683 9684 if (lproto && rproto) { // two C99 style function prototypes 9685 assert((AllowCXX || 9686 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && 9687 "C++ shouldn't be here"); 9688 // Compatible functions must have the same number of parameters 9689 if (lproto->getNumParams() != rproto->getNumParams()) 9690 return {}; 9691 9692 // Variadic and non-variadic functions aren't compatible 9693 if (lproto->isVariadic() != rproto->isVariadic()) 9694 return {}; 9695 9696 if (lproto->getMethodQuals() != rproto->getMethodQuals()) 9697 return {}; 9698 9699 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; 9700 bool canUseLeft, canUseRight; 9701 if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, 9702 newParamInfos)) 9703 return {}; 9704 9705 if (!canUseLeft) 9706 allLTypes = false; 9707 if (!canUseRight) 9708 allRTypes = false; 9709 9710 // Check parameter type compatibility 9711 SmallVector<QualType, 10> types; 9712 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { 9713 QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); 9714 QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); 9715 QualType paramType = mergeFunctionParameterTypes( 9716 lParamType, rParamType, OfBlockPointer, Unqualified); 9717 if (paramType.isNull()) 9718 return {}; 9719 9720 if (Unqualified) 9721 paramType = paramType.getUnqualifiedType(); 9722 9723 types.push_back(paramType); 9724 if (Unqualified) { 9725 lParamType = lParamType.getUnqualifiedType(); 9726 rParamType = rParamType.getUnqualifiedType(); 9727 } 9728 9729 if (getCanonicalType(paramType) != getCanonicalType(lParamType)) 9730 allLTypes = false; 9731 if (getCanonicalType(paramType) != getCanonicalType(rParamType)) 9732 allRTypes = false; 9733 } 9734 9735 if (allLTypes) return lhs; 9736 if (allRTypes) return rhs; 9737 9738 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); 9739 EPI.ExtInfo = einfo; 9740 EPI.ExtParameterInfos = 9741 newParamInfos.empty() ? nullptr : newParamInfos.data(); 9742 return getFunctionType(retType, types, EPI); 9743 } 9744 9745 if (lproto) allRTypes = false; 9746 if (rproto) allLTypes = false; 9747 9748 const FunctionProtoType *proto = lproto ? lproto : rproto; 9749 if (proto) { 9750 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here"); 9751 if (proto->isVariadic()) 9752 return {}; 9753 // Check that the types are compatible with the types that 9754 // would result from default argument promotions (C99 6.7.5.3p15). 9755 // The only types actually affected are promotable integer 9756 // types and floats, which would be passed as a different 9757 // type depending on whether the prototype is visible. 9758 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { 9759 QualType paramTy = proto->getParamType(i); 9760 9761 // Look at the converted type of enum types, since that is the type used 9762 // to pass enum values. 9763 if (const auto *Enum = paramTy->getAs<EnumType>()) { 9764 paramTy = Enum->getDecl()->getIntegerType(); 9765 if (paramTy.isNull()) 9766 return {}; 9767 } 9768 9769 if (paramTy->isPromotableIntegerType() || 9770 getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) 9771 return {}; 9772 } 9773 9774 if (allLTypes) return lhs; 9775 if (allRTypes) return rhs; 9776 9777 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); 9778 EPI.ExtInfo = einfo; 9779 return getFunctionType(retType, proto->getParamTypes(), EPI); 9780 } 9781 9782 if (allLTypes) return lhs; 9783 if (allRTypes) return rhs; 9784 return getFunctionNoProtoType(retType, einfo); 9785 } 9786 9787 /// Given that we have an enum type and a non-enum type, try to merge them. 9788 static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, 9789 QualType other, bool isBlockReturnType) { 9790 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, 9791 // a signed integer type, or an unsigned integer type. 9792 // Compatibility is based on the underlying type, not the promotion 9793 // type. 9794 QualType underlyingType = ET->getDecl()->getIntegerType(); 9795 if (underlyingType.isNull()) 9796 return {}; 9797 if (Context.hasSameType(underlyingType, other)) 9798 return other; 9799 9800 // In block return types, we're more permissive and accept any 9801 // integral type of the same size. 9802 if (isBlockReturnType && other->isIntegerType() && 9803 Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) 9804 return other; 9805 9806 return {}; 9807 } 9808 9809 QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, 9810 bool OfBlockPointer, 9811 bool Unqualified, bool BlockReturnType) { 9812 // For C++ we will not reach this code with reference types (see below), 9813 // for OpenMP variant call overloading we might. 9814 // 9815 // C++ [expr]: If an expression initially has the type "reference to T", the 9816 // type is adjusted to "T" prior to any further analysis, the expression 9817 // designates the object or function denoted by the reference, and the 9818 // expression is an lvalue unless the reference is an rvalue reference and 9819 // the expression is a function call (possibly inside parentheses). 9820 if (LangOpts.OpenMP && LHS->getAs<ReferenceType>() && 9821 RHS->getAs<ReferenceType>() && LHS->getTypeClass() == RHS->getTypeClass()) 9822 return mergeTypes(LHS->getAs<ReferenceType>()->getPointeeType(), 9823 RHS->getAs<ReferenceType>()->getPointeeType(), 9824 OfBlockPointer, Unqualified, BlockReturnType); 9825 if (LHS->getAs<ReferenceType>() || RHS->getAs<ReferenceType>()) 9826 return {}; 9827 9828 if (Unqualified) { 9829 LHS = LHS.getUnqualifiedType(); 9830 RHS = RHS.getUnqualifiedType(); 9831 } 9832 9833 QualType LHSCan = getCanonicalType(LHS), 9834 RHSCan = getCanonicalType(RHS); 9835 9836 // If two types are identical, they are compatible. 9837 if (LHSCan == RHSCan) 9838 return LHS; 9839 9840 // If the qualifiers are different, the types aren't compatible... mostly. 9841 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 9842 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 9843 if (LQuals != RQuals) { 9844 // If any of these qualifiers are different, we have a type 9845 // mismatch. 9846 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 9847 LQuals.getAddressSpace() != RQuals.getAddressSpace() || 9848 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || 9849 LQuals.hasUnaligned() != RQuals.hasUnaligned()) 9850 return {}; 9851 9852 // Exactly one GC qualifier difference is allowed: __strong is 9853 // okay if the other type has no GC qualifier but is an Objective 9854 // C object pointer (i.e. implicitly strong by default). We fix 9855 // this by pretending that the unqualified type was actually 9856 // qualified __strong. 9857 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 9858 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 9859 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 9860 9861 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 9862 return {}; 9863 9864 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { 9865 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); 9866 } 9867 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { 9868 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); 9869 } 9870 return {}; 9871 } 9872 9873 // Okay, qualifiers are equal. 9874 9875 Type::TypeClass LHSClass = LHSCan->getTypeClass(); 9876 Type::TypeClass RHSClass = RHSCan->getTypeClass(); 9877 9878 // We want to consider the two function types to be the same for these 9879 // comparisons, just force one to the other. 9880 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; 9881 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; 9882 9883 // Same as above for arrays 9884 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) 9885 LHSClass = Type::ConstantArray; 9886 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) 9887 RHSClass = Type::ConstantArray; 9888 9889 // ObjCInterfaces are just specialized ObjCObjects. 9890 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; 9891 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; 9892 9893 // Canonicalize ExtVector -> Vector. 9894 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; 9895 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; 9896 9897 // If the canonical type classes don't match. 9898 if (LHSClass != RHSClass) { 9899 // Note that we only have special rules for turning block enum 9900 // returns into block int returns, not vice-versa. 9901 if (const auto *ETy = LHS->getAs<EnumType>()) { 9902 return mergeEnumWithInteger(*this, ETy, RHS, false); 9903 } 9904 if (const EnumType* ETy = RHS->getAs<EnumType>()) { 9905 return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); 9906 } 9907 // allow block pointer type to match an 'id' type. 9908 if (OfBlockPointer && !BlockReturnType) { 9909 if (LHS->isObjCIdType() && RHS->isBlockPointerType()) 9910 return LHS; 9911 if (RHS->isObjCIdType() && LHS->isBlockPointerType()) 9912 return RHS; 9913 } 9914 9915 return {}; 9916 } 9917 9918 // The canonical type classes match. 9919 switch (LHSClass) { 9920 #define TYPE(Class, Base) 9921 #define ABSTRACT_TYPE(Class, Base) 9922 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 9923 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 9924 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 9925 #include "clang/AST/TypeNodes.inc" 9926 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 9927 9928 case Type::Auto: 9929 case Type::DeducedTemplateSpecialization: 9930 case Type::LValueReference: 9931 case Type::RValueReference: 9932 case Type::MemberPointer: 9933 llvm_unreachable("C++ should never be in mergeTypes"); 9934 9935 case Type::ObjCInterface: 9936 case Type::IncompleteArray: 9937 case Type::VariableArray: 9938 case Type::FunctionProto: 9939 case Type::ExtVector: 9940 llvm_unreachable("Types are eliminated above"); 9941 9942 case Type::Pointer: 9943 { 9944 // Merge two pointer types, while trying to preserve typedef info 9945 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType(); 9946 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType(); 9947 if (Unqualified) { 9948 LHSPointee = LHSPointee.getUnqualifiedType(); 9949 RHSPointee = RHSPointee.getUnqualifiedType(); 9950 } 9951 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, 9952 Unqualified); 9953 if (ResultType.isNull()) 9954 return {}; 9955 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 9956 return LHS; 9957 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 9958 return RHS; 9959 return getPointerType(ResultType); 9960 } 9961 case Type::BlockPointer: 9962 { 9963 // Merge two block pointer types, while trying to preserve typedef info 9964 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType(); 9965 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType(); 9966 if (Unqualified) { 9967 LHSPointee = LHSPointee.getUnqualifiedType(); 9968 RHSPointee = RHSPointee.getUnqualifiedType(); 9969 } 9970 if (getLangOpts().OpenCL) { 9971 Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); 9972 Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); 9973 // Blocks can't be an expression in a ternary operator (OpenCL v2.0 9974 // 6.12.5) thus the following check is asymmetric. 9975 if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) 9976 return {}; 9977 LHSPteeQual.removeAddressSpace(); 9978 RHSPteeQual.removeAddressSpace(); 9979 LHSPointee = 9980 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); 9981 RHSPointee = 9982 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); 9983 } 9984 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, 9985 Unqualified); 9986 if (ResultType.isNull()) 9987 return {}; 9988 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 9989 return LHS; 9990 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 9991 return RHS; 9992 return getBlockPointerType(ResultType); 9993 } 9994 case Type::Atomic: 9995 { 9996 // Merge two pointer types, while trying to preserve typedef info 9997 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType(); 9998 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType(); 9999 if (Unqualified) { 10000 LHSValue = LHSValue.getUnqualifiedType(); 10001 RHSValue = RHSValue.getUnqualifiedType(); 10002 } 10003 QualType ResultType = mergeTypes(LHSValue, RHSValue, false, 10004 Unqualified); 10005 if (ResultType.isNull()) 10006 return {}; 10007 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) 10008 return LHS; 10009 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) 10010 return RHS; 10011 return getAtomicType(ResultType); 10012 } 10013 case Type::ConstantArray: 10014 { 10015 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); 10016 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); 10017 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) 10018 return {}; 10019 10020 QualType LHSElem = getAsArrayType(LHS)->getElementType(); 10021 QualType RHSElem = getAsArrayType(RHS)->getElementType(); 10022 if (Unqualified) { 10023 LHSElem = LHSElem.getUnqualifiedType(); 10024 RHSElem = RHSElem.getUnqualifiedType(); 10025 } 10026 10027 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); 10028 if (ResultType.isNull()) 10029 return {}; 10030 10031 const VariableArrayType* LVAT = getAsVariableArrayType(LHS); 10032 const VariableArrayType* RVAT = getAsVariableArrayType(RHS); 10033 10034 // If either side is a variable array, and both are complete, check whether 10035 // the current dimension is definite. 10036 if (LVAT || RVAT) { 10037 auto SizeFetch = [this](const VariableArrayType* VAT, 10038 const ConstantArrayType* CAT) 10039 -> std::pair<bool,llvm::APInt> { 10040 if (VAT) { 10041 Optional<llvm::APSInt> TheInt; 10042 Expr *E = VAT->getSizeExpr(); 10043 if (E && (TheInt = E->getIntegerConstantExpr(*this))) 10044 return std::make_pair(true, *TheInt); 10045 return std::make_pair(false, llvm::APSInt()); 10046 } 10047 if (CAT) 10048 return std::make_pair(true, CAT->getSize()); 10049 return std::make_pair(false, llvm::APInt()); 10050 }; 10051 10052 bool HaveLSize, HaveRSize; 10053 llvm::APInt LSize, RSize; 10054 std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); 10055 std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); 10056 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) 10057 return {}; // Definite, but unequal, array dimension 10058 } 10059 10060 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10061 return LHS; 10062 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10063 return RHS; 10064 if (LCAT) 10065 return getConstantArrayType(ResultType, LCAT->getSize(), 10066 LCAT->getSizeExpr(), 10067 ArrayType::ArraySizeModifier(), 0); 10068 if (RCAT) 10069 return getConstantArrayType(ResultType, RCAT->getSize(), 10070 RCAT->getSizeExpr(), 10071 ArrayType::ArraySizeModifier(), 0); 10072 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10073 return LHS; 10074 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10075 return RHS; 10076 if (LVAT) { 10077 // FIXME: This isn't correct! But tricky to implement because 10078 // the array's size has to be the size of LHS, but the type 10079 // has to be different. 10080 return LHS; 10081 } 10082 if (RVAT) { 10083 // FIXME: This isn't correct! But tricky to implement because 10084 // the array's size has to be the size of RHS, but the type 10085 // has to be different. 10086 return RHS; 10087 } 10088 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; 10089 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; 10090 return getIncompleteArrayType(ResultType, 10091 ArrayType::ArraySizeModifier(), 0); 10092 } 10093 case Type::FunctionNoProto: 10094 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified); 10095 case Type::Record: 10096 case Type::Enum: 10097 return {}; 10098 case Type::Builtin: 10099 // Only exactly equal builtin types are compatible, which is tested above. 10100 return {}; 10101 case Type::Complex: 10102 // Distinct complex types are incompatible. 10103 return {}; 10104 case Type::Vector: 10105 // FIXME: The merged type should be an ExtVector! 10106 if (areCompatVectorTypes(LHSCan->castAs<VectorType>(), 10107 RHSCan->castAs<VectorType>())) 10108 return LHS; 10109 return {}; 10110 case Type::ConstantMatrix: 10111 if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(), 10112 RHSCan->castAs<ConstantMatrixType>())) 10113 return LHS; 10114 return {}; 10115 case Type::ObjCObject: { 10116 // Check if the types are assignment compatible. 10117 // FIXME: This should be type compatibility, e.g. whether 10118 // "LHS x; RHS x;" at global scope is legal. 10119 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(), 10120 RHS->castAs<ObjCObjectType>())) 10121 return LHS; 10122 return {}; 10123 } 10124 case Type::ObjCObjectPointer: 10125 if (OfBlockPointer) { 10126 if (canAssignObjCInterfacesInBlockPointer( 10127 LHS->castAs<ObjCObjectPointerType>(), 10128 RHS->castAs<ObjCObjectPointerType>(), BlockReturnType)) 10129 return LHS; 10130 return {}; 10131 } 10132 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(), 10133 RHS->castAs<ObjCObjectPointerType>())) 10134 return LHS; 10135 return {}; 10136 case Type::Pipe: 10137 assert(LHS != RHS && 10138 "Equivalent pipe types should have already been handled!"); 10139 return {}; 10140 case Type::BitInt: { 10141 // Merge two bit-precise int types, while trying to preserve typedef info. 10142 bool LHSUnsigned = LHS->castAs<BitIntType>()->isUnsigned(); 10143 bool RHSUnsigned = RHS->castAs<BitIntType>()->isUnsigned(); 10144 unsigned LHSBits = LHS->castAs<BitIntType>()->getNumBits(); 10145 unsigned RHSBits = RHS->castAs<BitIntType>()->getNumBits(); 10146 10147 // Like unsigned/int, shouldn't have a type if they don't match. 10148 if (LHSUnsigned != RHSUnsigned) 10149 return {}; 10150 10151 if (LHSBits != RHSBits) 10152 return {}; 10153 return LHS; 10154 } 10155 } 10156 10157 llvm_unreachable("Invalid Type::Class!"); 10158 } 10159 10160 bool ASTContext::mergeExtParameterInfo( 10161 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, 10162 bool &CanUseFirst, bool &CanUseSecond, 10163 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) { 10164 assert(NewParamInfos.empty() && "param info list not empty"); 10165 CanUseFirst = CanUseSecond = true; 10166 bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); 10167 bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); 10168 10169 // Fast path: if the first type doesn't have ext parameter infos, 10170 // we match if and only if the second type also doesn't have them. 10171 if (!FirstHasInfo && !SecondHasInfo) 10172 return true; 10173 10174 bool NeedParamInfo = false; 10175 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() 10176 : SecondFnType->getExtParameterInfos().size(); 10177 10178 for (size_t I = 0; I < E; ++I) { 10179 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; 10180 if (FirstHasInfo) 10181 FirstParam = FirstFnType->getExtParameterInfo(I); 10182 if (SecondHasInfo) 10183 SecondParam = SecondFnType->getExtParameterInfo(I); 10184 10185 // Cannot merge unless everything except the noescape flag matches. 10186 if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) 10187 return false; 10188 10189 bool FirstNoEscape = FirstParam.isNoEscape(); 10190 bool SecondNoEscape = SecondParam.isNoEscape(); 10191 bool IsNoEscape = FirstNoEscape && SecondNoEscape; 10192 NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); 10193 if (NewParamInfos.back().getOpaqueValue()) 10194 NeedParamInfo = true; 10195 if (FirstNoEscape != IsNoEscape) 10196 CanUseFirst = false; 10197 if (SecondNoEscape != IsNoEscape) 10198 CanUseSecond = false; 10199 } 10200 10201 if (!NeedParamInfo) 10202 NewParamInfos.clear(); 10203 10204 return true; 10205 } 10206 10207 void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { 10208 ObjCLayouts[CD] = nullptr; 10209 } 10210 10211 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and 10212 /// 'RHS' attributes and returns the merged version; including for function 10213 /// return types. 10214 QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { 10215 QualType LHSCan = getCanonicalType(LHS), 10216 RHSCan = getCanonicalType(RHS); 10217 // If two types are identical, they are compatible. 10218 if (LHSCan == RHSCan) 10219 return LHS; 10220 if (RHSCan->isFunctionType()) { 10221 if (!LHSCan->isFunctionType()) 10222 return {}; 10223 QualType OldReturnType = 10224 cast<FunctionType>(RHSCan.getTypePtr())->getReturnType(); 10225 QualType NewReturnType = 10226 cast<FunctionType>(LHSCan.getTypePtr())->getReturnType(); 10227 QualType ResReturnType = 10228 mergeObjCGCQualifiers(NewReturnType, OldReturnType); 10229 if (ResReturnType.isNull()) 10230 return {}; 10231 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { 10232 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); 10233 // In either case, use OldReturnType to build the new function type. 10234 const auto *F = LHS->castAs<FunctionType>(); 10235 if (const auto *FPT = cast<FunctionProtoType>(F)) { 10236 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 10237 EPI.ExtInfo = getFunctionExtInfo(LHS); 10238 QualType ResultType = 10239 getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); 10240 return ResultType; 10241 } 10242 } 10243 return {}; 10244 } 10245 10246 // If the qualifiers are different, the types can still be merged. 10247 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10248 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10249 if (LQuals != RQuals) { 10250 // If any of these qualifiers are different, we have a type mismatch. 10251 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10252 LQuals.getAddressSpace() != RQuals.getAddressSpace()) 10253 return {}; 10254 10255 // Exactly one GC qualifier difference is allowed: __strong is 10256 // okay if the other type has no GC qualifier but is an Objective 10257 // C object pointer (i.e. implicitly strong by default). We fix 10258 // this by pretending that the unqualified type was actually 10259 // qualified __strong. 10260 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10261 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10262 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10263 10264 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10265 return {}; 10266 10267 if (GC_L == Qualifiers::Strong) 10268 return LHS; 10269 if (GC_R == Qualifiers::Strong) 10270 return RHS; 10271 return {}; 10272 } 10273 10274 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { 10275 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10276 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10277 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); 10278 if (ResQT == LHSBaseQT) 10279 return LHS; 10280 if (ResQT == RHSBaseQT) 10281 return RHS; 10282 } 10283 return {}; 10284 } 10285 10286 //===----------------------------------------------------------------------===// 10287 // Integer Predicates 10288 //===----------------------------------------------------------------------===// 10289 10290 unsigned ASTContext::getIntWidth(QualType T) const { 10291 if (const auto *ET = T->getAs<EnumType>()) 10292 T = ET->getDecl()->getIntegerType(); 10293 if (T->isBooleanType()) 10294 return 1; 10295 if (const auto *EIT = T->getAs<BitIntType>()) 10296 return EIT->getNumBits(); 10297 // For builtin types, just use the standard type sizing method 10298 return (unsigned)getTypeSize(T); 10299 } 10300 10301 QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { 10302 assert((T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && 10303 "Unexpected type"); 10304 10305 // Turn <4 x signed int> -> <4 x unsigned int> 10306 if (const auto *VTy = T->getAs<VectorType>()) 10307 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), 10308 VTy->getNumElements(), VTy->getVectorKind()); 10309 10310 // For _BitInt, return an unsigned _BitInt with same width. 10311 if (const auto *EITy = T->getAs<BitIntType>()) 10312 return getBitIntType(/*IsUnsigned=*/true, EITy->getNumBits()); 10313 10314 // For enums, get the underlying integer type of the enum, and let the general 10315 // integer type signchanging code handle it. 10316 if (const auto *ETy = T->getAs<EnumType>()) 10317 T = ETy->getDecl()->getIntegerType(); 10318 10319 switch (T->castAs<BuiltinType>()->getKind()) { 10320 case BuiltinType::Char_S: 10321 case BuiltinType::SChar: 10322 return UnsignedCharTy; 10323 case BuiltinType::Short: 10324 return UnsignedShortTy; 10325 case BuiltinType::Int: 10326 return UnsignedIntTy; 10327 case BuiltinType::Long: 10328 return UnsignedLongTy; 10329 case BuiltinType::LongLong: 10330 return UnsignedLongLongTy; 10331 case BuiltinType::Int128: 10332 return UnsignedInt128Ty; 10333 // wchar_t is special. It is either signed or not, but when it's signed, 10334 // there's no matching "unsigned wchar_t". Therefore we return the unsigned 10335 // version of it's underlying type instead. 10336 case BuiltinType::WChar_S: 10337 return getUnsignedWCharType(); 10338 10339 case BuiltinType::ShortAccum: 10340 return UnsignedShortAccumTy; 10341 case BuiltinType::Accum: 10342 return UnsignedAccumTy; 10343 case BuiltinType::LongAccum: 10344 return UnsignedLongAccumTy; 10345 case BuiltinType::SatShortAccum: 10346 return SatUnsignedShortAccumTy; 10347 case BuiltinType::SatAccum: 10348 return SatUnsignedAccumTy; 10349 case BuiltinType::SatLongAccum: 10350 return SatUnsignedLongAccumTy; 10351 case BuiltinType::ShortFract: 10352 return UnsignedShortFractTy; 10353 case BuiltinType::Fract: 10354 return UnsignedFractTy; 10355 case BuiltinType::LongFract: 10356 return UnsignedLongFractTy; 10357 case BuiltinType::SatShortFract: 10358 return SatUnsignedShortFractTy; 10359 case BuiltinType::SatFract: 10360 return SatUnsignedFractTy; 10361 case BuiltinType::SatLongFract: 10362 return SatUnsignedLongFractTy; 10363 default: 10364 llvm_unreachable("Unexpected signed integer or fixed point type"); 10365 } 10366 } 10367 10368 QualType ASTContext::getCorrespondingSignedType(QualType T) const { 10369 assert((T->hasUnsignedIntegerRepresentation() || 10370 T->isUnsignedFixedPointType()) && 10371 "Unexpected type"); 10372 10373 // Turn <4 x unsigned int> -> <4 x signed int> 10374 if (const auto *VTy = T->getAs<VectorType>()) 10375 return getVectorType(getCorrespondingSignedType(VTy->getElementType()), 10376 VTy->getNumElements(), VTy->getVectorKind()); 10377 10378 // For _BitInt, return a signed _BitInt with same width. 10379 if (const auto *EITy = T->getAs<BitIntType>()) 10380 return getBitIntType(/*IsUnsigned=*/false, EITy->getNumBits()); 10381 10382 // For enums, get the underlying integer type of the enum, and let the general 10383 // integer type signchanging code handle it. 10384 if (const auto *ETy = T->getAs<EnumType>()) 10385 T = ETy->getDecl()->getIntegerType(); 10386 10387 switch (T->castAs<BuiltinType>()->getKind()) { 10388 case BuiltinType::Char_U: 10389 case BuiltinType::UChar: 10390 return SignedCharTy; 10391 case BuiltinType::UShort: 10392 return ShortTy; 10393 case BuiltinType::UInt: 10394 return IntTy; 10395 case BuiltinType::ULong: 10396 return LongTy; 10397 case BuiltinType::ULongLong: 10398 return LongLongTy; 10399 case BuiltinType::UInt128: 10400 return Int128Ty; 10401 // wchar_t is special. It is either unsigned or not, but when it's unsigned, 10402 // there's no matching "signed wchar_t". Therefore we return the signed 10403 // version of it's underlying type instead. 10404 case BuiltinType::WChar_U: 10405 return getSignedWCharType(); 10406 10407 case BuiltinType::UShortAccum: 10408 return ShortAccumTy; 10409 case BuiltinType::UAccum: 10410 return AccumTy; 10411 case BuiltinType::ULongAccum: 10412 return LongAccumTy; 10413 case BuiltinType::SatUShortAccum: 10414 return SatShortAccumTy; 10415 case BuiltinType::SatUAccum: 10416 return SatAccumTy; 10417 case BuiltinType::SatULongAccum: 10418 return SatLongAccumTy; 10419 case BuiltinType::UShortFract: 10420 return ShortFractTy; 10421 case BuiltinType::UFract: 10422 return FractTy; 10423 case BuiltinType::ULongFract: 10424 return LongFractTy; 10425 case BuiltinType::SatUShortFract: 10426 return SatShortFractTy; 10427 case BuiltinType::SatUFract: 10428 return SatFractTy; 10429 case BuiltinType::SatULongFract: 10430 return SatLongFractTy; 10431 default: 10432 llvm_unreachable("Unexpected unsigned integer or fixed point type"); 10433 } 10434 } 10435 10436 ASTMutationListener::~ASTMutationListener() = default; 10437 10438 void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, 10439 QualType ReturnType) {} 10440 10441 //===----------------------------------------------------------------------===// 10442 // Builtin Type Computation 10443 //===----------------------------------------------------------------------===// 10444 10445 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the 10446 /// pointer over the consumed characters. This returns the resultant type. If 10447 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic 10448 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of 10449 /// a vector of "i*". 10450 /// 10451 /// RequiresICE is filled in on return to indicate whether the value is required 10452 /// to be an Integer Constant Expression. 10453 static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, 10454 ASTContext::GetBuiltinTypeError &Error, 10455 bool &RequiresICE, 10456 bool AllowTypeModifiers) { 10457 // Modifiers. 10458 int HowLong = 0; 10459 bool Signed = false, Unsigned = false; 10460 RequiresICE = false; 10461 10462 // Read the prefixed modifiers first. 10463 bool Done = false; 10464 #ifndef NDEBUG 10465 bool IsSpecial = false; 10466 #endif 10467 while (!Done) { 10468 switch (*Str++) { 10469 default: Done = true; --Str; break; 10470 case 'I': 10471 RequiresICE = true; 10472 break; 10473 case 'S': 10474 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); 10475 assert(!Signed && "Can't use 'S' modifier multiple times!"); 10476 Signed = true; 10477 break; 10478 case 'U': 10479 assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); 10480 assert(!Unsigned && "Can't use 'U' modifier multiple times!"); 10481 Unsigned = true; 10482 break; 10483 case 'L': 10484 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers"); 10485 assert(HowLong <= 2 && "Can't have LLLL modifier"); 10486 ++HowLong; 10487 break; 10488 case 'N': 10489 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. 10490 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10491 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!"); 10492 #ifndef NDEBUG 10493 IsSpecial = true; 10494 #endif 10495 if (Context.getTargetInfo().getLongWidth() == 32) 10496 ++HowLong; 10497 break; 10498 case 'W': 10499 // This modifier represents int64 type. 10500 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10501 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!"); 10502 #ifndef NDEBUG 10503 IsSpecial = true; 10504 #endif 10505 switch (Context.getTargetInfo().getInt64Type()) { 10506 default: 10507 llvm_unreachable("Unexpected integer type"); 10508 case TargetInfo::SignedLong: 10509 HowLong = 1; 10510 break; 10511 case TargetInfo::SignedLongLong: 10512 HowLong = 2; 10513 break; 10514 } 10515 break; 10516 case 'Z': 10517 // This modifier represents int32 type. 10518 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10519 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!"); 10520 #ifndef NDEBUG 10521 IsSpecial = true; 10522 #endif 10523 switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { 10524 default: 10525 llvm_unreachable("Unexpected integer type"); 10526 case TargetInfo::SignedInt: 10527 HowLong = 0; 10528 break; 10529 case TargetInfo::SignedLong: 10530 HowLong = 1; 10531 break; 10532 case TargetInfo::SignedLongLong: 10533 HowLong = 2; 10534 break; 10535 } 10536 break; 10537 case 'O': 10538 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10539 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!"); 10540 #ifndef NDEBUG 10541 IsSpecial = true; 10542 #endif 10543 if (Context.getLangOpts().OpenCL) 10544 HowLong = 1; 10545 else 10546 HowLong = 2; 10547 break; 10548 } 10549 } 10550 10551 QualType Type; 10552 10553 // Read the base type. 10554 switch (*Str++) { 10555 default: llvm_unreachable("Unknown builtin type letter!"); 10556 case 'x': 10557 assert(HowLong == 0 && !Signed && !Unsigned && 10558 "Bad modifiers used with 'x'!"); 10559 Type = Context.Float16Ty; 10560 break; 10561 case 'y': 10562 assert(HowLong == 0 && !Signed && !Unsigned && 10563 "Bad modifiers used with 'y'!"); 10564 Type = Context.BFloat16Ty; 10565 break; 10566 case 'v': 10567 assert(HowLong == 0 && !Signed && !Unsigned && 10568 "Bad modifiers used with 'v'!"); 10569 Type = Context.VoidTy; 10570 break; 10571 case 'h': 10572 assert(HowLong == 0 && !Signed && !Unsigned && 10573 "Bad modifiers used with 'h'!"); 10574 Type = Context.HalfTy; 10575 break; 10576 case 'f': 10577 assert(HowLong == 0 && !Signed && !Unsigned && 10578 "Bad modifiers used with 'f'!"); 10579 Type = Context.FloatTy; 10580 break; 10581 case 'd': 10582 assert(HowLong < 3 && !Signed && !Unsigned && 10583 "Bad modifiers used with 'd'!"); 10584 if (HowLong == 1) 10585 Type = Context.LongDoubleTy; 10586 else if (HowLong == 2) 10587 Type = Context.Float128Ty; 10588 else 10589 Type = Context.DoubleTy; 10590 break; 10591 case 's': 10592 assert(HowLong == 0 && "Bad modifiers used with 's'!"); 10593 if (Unsigned) 10594 Type = Context.UnsignedShortTy; 10595 else 10596 Type = Context.ShortTy; 10597 break; 10598 case 'i': 10599 if (HowLong == 3) 10600 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; 10601 else if (HowLong == 2) 10602 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; 10603 else if (HowLong == 1) 10604 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; 10605 else 10606 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; 10607 break; 10608 case 'c': 10609 assert(HowLong == 0 && "Bad modifiers used with 'c'!"); 10610 if (Signed) 10611 Type = Context.SignedCharTy; 10612 else if (Unsigned) 10613 Type = Context.UnsignedCharTy; 10614 else 10615 Type = Context.CharTy; 10616 break; 10617 case 'b': // boolean 10618 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); 10619 Type = Context.BoolTy; 10620 break; 10621 case 'z': // size_t. 10622 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); 10623 Type = Context.getSizeType(); 10624 break; 10625 case 'w': // wchar_t. 10626 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!"); 10627 Type = Context.getWideCharType(); 10628 break; 10629 case 'F': 10630 Type = Context.getCFConstantStringType(); 10631 break; 10632 case 'G': 10633 Type = Context.getObjCIdType(); 10634 break; 10635 case 'H': 10636 Type = Context.getObjCSelType(); 10637 break; 10638 case 'M': 10639 Type = Context.getObjCSuperType(); 10640 break; 10641 case 'a': 10642 Type = Context.getBuiltinVaListType(); 10643 assert(!Type.isNull() && "builtin va list type not initialized!"); 10644 break; 10645 case 'A': 10646 // This is a "reference" to a va_list; however, what exactly 10647 // this means depends on how va_list is defined. There are two 10648 // different kinds of va_list: ones passed by value, and ones 10649 // passed by reference. An example of a by-value va_list is 10650 // x86, where va_list is a char*. An example of by-ref va_list 10651 // is x86-64, where va_list is a __va_list_tag[1]. For x86, 10652 // we want this argument to be a char*&; for x86-64, we want 10653 // it to be a __va_list_tag*. 10654 Type = Context.getBuiltinVaListType(); 10655 assert(!Type.isNull() && "builtin va list type not initialized!"); 10656 if (Type->isArrayType()) 10657 Type = Context.getArrayDecayedType(Type); 10658 else 10659 Type = Context.getLValueReferenceType(Type); 10660 break; 10661 case 'q': { 10662 char *End; 10663 unsigned NumElements = strtoul(Str, &End, 10); 10664 assert(End != Str && "Missing vector size"); 10665 Str = End; 10666 10667 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 10668 RequiresICE, false); 10669 assert(!RequiresICE && "Can't require vector ICE"); 10670 10671 Type = Context.getScalableVectorType(ElementType, NumElements); 10672 break; 10673 } 10674 case 'V': { 10675 char *End; 10676 unsigned NumElements = strtoul(Str, &End, 10); 10677 assert(End != Str && "Missing vector size"); 10678 Str = End; 10679 10680 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 10681 RequiresICE, false); 10682 assert(!RequiresICE && "Can't require vector ICE"); 10683 10684 // TODO: No way to make AltiVec vectors in builtins yet. 10685 Type = Context.getVectorType(ElementType, NumElements, 10686 VectorType::GenericVector); 10687 break; 10688 } 10689 case 'E': { 10690 char *End; 10691 10692 unsigned NumElements = strtoul(Str, &End, 10); 10693 assert(End != Str && "Missing vector size"); 10694 10695 Str = End; 10696 10697 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 10698 false); 10699 Type = Context.getExtVectorType(ElementType, NumElements); 10700 break; 10701 } 10702 case 'X': { 10703 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 10704 false); 10705 assert(!RequiresICE && "Can't require complex ICE"); 10706 Type = Context.getComplexType(ElementType); 10707 break; 10708 } 10709 case 'Y': 10710 Type = Context.getPointerDiffType(); 10711 break; 10712 case 'P': 10713 Type = Context.getFILEType(); 10714 if (Type.isNull()) { 10715 Error = ASTContext::GE_Missing_stdio; 10716 return {}; 10717 } 10718 break; 10719 case 'J': 10720 if (Signed) 10721 Type = Context.getsigjmp_bufType(); 10722 else 10723 Type = Context.getjmp_bufType(); 10724 10725 if (Type.isNull()) { 10726 Error = ASTContext::GE_Missing_setjmp; 10727 return {}; 10728 } 10729 break; 10730 case 'K': 10731 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); 10732 Type = Context.getucontext_tType(); 10733 10734 if (Type.isNull()) { 10735 Error = ASTContext::GE_Missing_ucontext; 10736 return {}; 10737 } 10738 break; 10739 case 'p': 10740 Type = Context.getProcessIDType(); 10741 break; 10742 } 10743 10744 // If there are modifiers and if we're allowed to parse them, go for it. 10745 Done = !AllowTypeModifiers; 10746 while (!Done) { 10747 switch (char c = *Str++) { 10748 default: Done = true; --Str; break; 10749 case '*': 10750 case '&': { 10751 // Both pointers and references can have their pointee types 10752 // qualified with an address space. 10753 char *End; 10754 unsigned AddrSpace = strtoul(Str, &End, 10); 10755 if (End != Str) { 10756 // Note AddrSpace == 0 is not the same as an unspecified address space. 10757 Type = Context.getAddrSpaceQualType( 10758 Type, 10759 Context.getLangASForBuiltinAddressSpace(AddrSpace)); 10760 Str = End; 10761 } 10762 if (c == '*') 10763 Type = Context.getPointerType(Type); 10764 else 10765 Type = Context.getLValueReferenceType(Type); 10766 break; 10767 } 10768 // FIXME: There's no way to have a built-in with an rvalue ref arg. 10769 case 'C': 10770 Type = Type.withConst(); 10771 break; 10772 case 'D': 10773 Type = Context.getVolatileType(Type); 10774 break; 10775 case 'R': 10776 Type = Type.withRestrict(); 10777 break; 10778 } 10779 } 10780 10781 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && 10782 "Integer constant 'I' type must be an integer"); 10783 10784 return Type; 10785 } 10786 10787 // On some targets such as PowerPC, some of the builtins are defined with custom 10788 // type descriptors for target-dependent types. These descriptors are decoded in 10789 // other functions, but it may be useful to be able to fall back to default 10790 // descriptor decoding to define builtins mixing target-dependent and target- 10791 // independent types. This function allows decoding one type descriptor with 10792 // default decoding. 10793 QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context, 10794 GetBuiltinTypeError &Error, bool &RequireICE, 10795 bool AllowTypeModifiers) const { 10796 return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers); 10797 } 10798 10799 /// GetBuiltinType - Return the type for the specified builtin. 10800 QualType ASTContext::GetBuiltinType(unsigned Id, 10801 GetBuiltinTypeError &Error, 10802 unsigned *IntegerConstantArgs) const { 10803 const char *TypeStr = BuiltinInfo.getTypeString(Id); 10804 if (TypeStr[0] == '\0') { 10805 Error = GE_Missing_type; 10806 return {}; 10807 } 10808 10809 SmallVector<QualType, 8> ArgTypes; 10810 10811 bool RequiresICE = false; 10812 Error = GE_None; 10813 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, 10814 RequiresICE, true); 10815 if (Error != GE_None) 10816 return {}; 10817 10818 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); 10819 10820 while (TypeStr[0] && TypeStr[0] != '.') { 10821 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); 10822 if (Error != GE_None) 10823 return {}; 10824 10825 // If this argument is required to be an IntegerConstantExpression and the 10826 // caller cares, fill in the bitmask we return. 10827 if (RequiresICE && IntegerConstantArgs) 10828 *IntegerConstantArgs |= 1 << ArgTypes.size(); 10829 10830 // Do array -> pointer decay. The builtin should use the decayed type. 10831 if (Ty->isArrayType()) 10832 Ty = getArrayDecayedType(Ty); 10833 10834 ArgTypes.push_back(Ty); 10835 } 10836 10837 if (Id == Builtin::BI__GetExceptionInfo) 10838 return {}; 10839 10840 assert((TypeStr[0] != '.' || TypeStr[1] == 0) && 10841 "'.' should only occur at end of builtin type list!"); 10842 10843 bool Variadic = (TypeStr[0] == '.'); 10844 10845 FunctionType::ExtInfo EI(getDefaultCallingConvention( 10846 Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); 10847 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); 10848 10849 10850 // We really shouldn't be making a no-proto type here. 10851 if (ArgTypes.empty() && Variadic && !getLangOpts().CPlusPlus) 10852 return getFunctionNoProtoType(ResType, EI); 10853 10854 FunctionProtoType::ExtProtoInfo EPI; 10855 EPI.ExtInfo = EI; 10856 EPI.Variadic = Variadic; 10857 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) 10858 EPI.ExceptionSpec.Type = 10859 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; 10860 10861 return getFunctionType(ResType, ArgTypes, EPI); 10862 } 10863 10864 static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, 10865 const FunctionDecl *FD) { 10866 if (!FD->isExternallyVisible()) 10867 return GVA_Internal; 10868 10869 // Non-user-provided functions get emitted as weak definitions with every 10870 // use, no matter whether they've been explicitly instantiated etc. 10871 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) 10872 if (!MD->isUserProvided()) 10873 return GVA_DiscardableODR; 10874 10875 GVALinkage External; 10876 switch (FD->getTemplateSpecializationKind()) { 10877 case TSK_Undeclared: 10878 case TSK_ExplicitSpecialization: 10879 External = GVA_StrongExternal; 10880 break; 10881 10882 case TSK_ExplicitInstantiationDefinition: 10883 return GVA_StrongODR; 10884 10885 // C++11 [temp.explicit]p10: 10886 // [ Note: The intent is that an inline function that is the subject of 10887 // an explicit instantiation declaration will still be implicitly 10888 // instantiated when used so that the body can be considered for 10889 // inlining, but that no out-of-line copy of the inline function would be 10890 // generated in the translation unit. -- end note ] 10891 case TSK_ExplicitInstantiationDeclaration: 10892 return GVA_AvailableExternally; 10893 10894 case TSK_ImplicitInstantiation: 10895 External = GVA_DiscardableODR; 10896 break; 10897 } 10898 10899 if (!FD->isInlined()) 10900 return External; 10901 10902 if ((!Context.getLangOpts().CPlusPlus && 10903 !Context.getTargetInfo().getCXXABI().isMicrosoft() && 10904 !FD->hasAttr<DLLExportAttr>()) || 10905 FD->hasAttr<GNUInlineAttr>()) { 10906 // FIXME: This doesn't match gcc's behavior for dllexport inline functions. 10907 10908 // GNU or C99 inline semantics. Determine whether this symbol should be 10909 // externally visible. 10910 if (FD->isInlineDefinitionExternallyVisible()) 10911 return External; 10912 10913 // C99 inline semantics, where the symbol is not externally visible. 10914 return GVA_AvailableExternally; 10915 } 10916 10917 // Functions specified with extern and inline in -fms-compatibility mode 10918 // forcibly get emitted. While the body of the function cannot be later 10919 // replaced, the function definition cannot be discarded. 10920 if (FD->isMSExternInline()) 10921 return GVA_StrongODR; 10922 10923 return GVA_DiscardableODR; 10924 } 10925 10926 static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, 10927 const Decl *D, GVALinkage L) { 10928 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx 10929 // dllexport/dllimport on inline functions. 10930 if (D->hasAttr<DLLImportAttr>()) { 10931 if (L == GVA_DiscardableODR || L == GVA_StrongODR) 10932 return GVA_AvailableExternally; 10933 } else if (D->hasAttr<DLLExportAttr>()) { 10934 if (L == GVA_DiscardableODR) 10935 return GVA_StrongODR; 10936 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) { 10937 // Device-side functions with __global__ attribute must always be 10938 // visible externally so they can be launched from host. 10939 if (D->hasAttr<CUDAGlobalAttr>() && 10940 (L == GVA_DiscardableODR || L == GVA_Internal)) 10941 return GVA_StrongODR; 10942 // Single source offloading languages like CUDA/HIP need to be able to 10943 // access static device variables from host code of the same compilation 10944 // unit. This is done by externalizing the static variable with a shared 10945 // name between the host and device compilation which is the same for the 10946 // same compilation unit whereas different among different compilation 10947 // units. 10948 if (Context.shouldExternalizeStaticVar(D)) 10949 return GVA_StrongExternal; 10950 } 10951 return L; 10952 } 10953 10954 /// Adjust the GVALinkage for a declaration based on what an external AST source 10955 /// knows about whether there can be other definitions of this declaration. 10956 static GVALinkage 10957 adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, 10958 GVALinkage L) { 10959 ExternalASTSource *Source = Ctx.getExternalSource(); 10960 if (!Source) 10961 return L; 10962 10963 switch (Source->hasExternalDefinitions(D)) { 10964 case ExternalASTSource::EK_Never: 10965 // Other translation units rely on us to provide the definition. 10966 if (L == GVA_DiscardableODR) 10967 return GVA_StrongODR; 10968 break; 10969 10970 case ExternalASTSource::EK_Always: 10971 return GVA_AvailableExternally; 10972 10973 case ExternalASTSource::EK_ReplyHazy: 10974 break; 10975 } 10976 return L; 10977 } 10978 10979 GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { 10980 return adjustGVALinkageForExternalDefinitionKind(*this, FD, 10981 adjustGVALinkageForAttributes(*this, FD, 10982 basicGVALinkageForFunction(*this, FD))); 10983 } 10984 10985 static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, 10986 const VarDecl *VD) { 10987 if (!VD->isExternallyVisible()) 10988 return GVA_Internal; 10989 10990 if (VD->isStaticLocal()) { 10991 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); 10992 while (LexicalContext && !isa<FunctionDecl>(LexicalContext)) 10993 LexicalContext = LexicalContext->getLexicalParent(); 10994 10995 // ObjC Blocks can create local variables that don't have a FunctionDecl 10996 // LexicalContext. 10997 if (!LexicalContext) 10998 return GVA_DiscardableODR; 10999 11000 // Otherwise, let the static local variable inherit its linkage from the 11001 // nearest enclosing function. 11002 auto StaticLocalLinkage = 11003 Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext)); 11004 11005 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must 11006 // be emitted in any object with references to the symbol for the object it 11007 // contains, whether inline or out-of-line." 11008 // Similar behavior is observed with MSVC. An alternative ABI could use 11009 // StrongODR/AvailableExternally to match the function, but none are 11010 // known/supported currently. 11011 if (StaticLocalLinkage == GVA_StrongODR || 11012 StaticLocalLinkage == GVA_AvailableExternally) 11013 return GVA_DiscardableODR; 11014 return StaticLocalLinkage; 11015 } 11016 11017 // MSVC treats in-class initialized static data members as definitions. 11018 // By giving them non-strong linkage, out-of-line definitions won't 11019 // cause link errors. 11020 if (Context.isMSStaticDataMemberInlineDefinition(VD)) 11021 return GVA_DiscardableODR; 11022 11023 // Most non-template variables have strong linkage; inline variables are 11024 // linkonce_odr or (occasionally, for compatibility) weak_odr. 11025 GVALinkage StrongLinkage; 11026 switch (Context.getInlineVariableDefinitionKind(VD)) { 11027 case ASTContext::InlineVariableDefinitionKind::None: 11028 StrongLinkage = GVA_StrongExternal; 11029 break; 11030 case ASTContext::InlineVariableDefinitionKind::Weak: 11031 case ASTContext::InlineVariableDefinitionKind::WeakUnknown: 11032 StrongLinkage = GVA_DiscardableODR; 11033 break; 11034 case ASTContext::InlineVariableDefinitionKind::Strong: 11035 StrongLinkage = GVA_StrongODR; 11036 break; 11037 } 11038 11039 switch (VD->getTemplateSpecializationKind()) { 11040 case TSK_Undeclared: 11041 return StrongLinkage; 11042 11043 case TSK_ExplicitSpecialization: 11044 return Context.getTargetInfo().getCXXABI().isMicrosoft() && 11045 VD->isStaticDataMember() 11046 ? GVA_StrongODR 11047 : StrongLinkage; 11048 11049 case TSK_ExplicitInstantiationDefinition: 11050 return GVA_StrongODR; 11051 11052 case TSK_ExplicitInstantiationDeclaration: 11053 return GVA_AvailableExternally; 11054 11055 case TSK_ImplicitInstantiation: 11056 return GVA_DiscardableODR; 11057 } 11058 11059 llvm_unreachable("Invalid Linkage!"); 11060 } 11061 11062 GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) { 11063 return adjustGVALinkageForExternalDefinitionKind(*this, VD, 11064 adjustGVALinkageForAttributes(*this, VD, 11065 basicGVALinkageForVariable(*this, VD))); 11066 } 11067 11068 bool ASTContext::DeclMustBeEmitted(const Decl *D) { 11069 if (const auto *VD = dyn_cast<VarDecl>(D)) { 11070 if (!VD->isFileVarDecl()) 11071 return false; 11072 // Global named register variables (GNU extension) are never emitted. 11073 if (VD->getStorageClass() == SC_Register) 11074 return false; 11075 if (VD->getDescribedVarTemplate() || 11076 isa<VarTemplatePartialSpecializationDecl>(VD)) 11077 return false; 11078 } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11079 // We never need to emit an uninstantiated function template. 11080 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 11081 return false; 11082 } else if (isa<PragmaCommentDecl>(D)) 11083 return true; 11084 else if (isa<PragmaDetectMismatchDecl>(D)) 11085 return true; 11086 else if (isa<OMPRequiresDecl>(D)) 11087 return true; 11088 else if (isa<OMPThreadPrivateDecl>(D)) 11089 return !D->getDeclContext()->isDependentContext(); 11090 else if (isa<OMPAllocateDecl>(D)) 11091 return !D->getDeclContext()->isDependentContext(); 11092 else if (isa<OMPDeclareReductionDecl>(D) || isa<OMPDeclareMapperDecl>(D)) 11093 return !D->getDeclContext()->isDependentContext(); 11094 else if (isa<ImportDecl>(D)) 11095 return true; 11096 else 11097 return false; 11098 11099 // If this is a member of a class template, we do not need to emit it. 11100 if (D->getDeclContext()->isDependentContext()) 11101 return false; 11102 11103 // Weak references don't produce any output by themselves. 11104 if (D->hasAttr<WeakRefAttr>()) 11105 return false; 11106 11107 // Aliases and used decls are required. 11108 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) 11109 return true; 11110 11111 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11112 // Forward declarations aren't required. 11113 if (!FD->doesThisDeclarationHaveABody()) 11114 return FD->doesDeclarationForceExternallyVisibleDefinition(); 11115 11116 // Constructors and destructors are required. 11117 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) 11118 return true; 11119 11120 // The key function for a class is required. This rule only comes 11121 // into play when inline functions can be key functions, though. 11122 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { 11123 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) { 11124 const CXXRecordDecl *RD = MD->getParent(); 11125 if (MD->isOutOfLine() && RD->isDynamicClass()) { 11126 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); 11127 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) 11128 return true; 11129 } 11130 } 11131 } 11132 11133 GVALinkage Linkage = GetGVALinkageForFunction(FD); 11134 11135 // static, static inline, always_inline, and extern inline functions can 11136 // always be deferred. Normal inline functions can be deferred in C99/C++. 11137 // Implicit template instantiations can also be deferred in C++. 11138 return !isDiscardableGVALinkage(Linkage); 11139 } 11140 11141 const auto *VD = cast<VarDecl>(D); 11142 assert(VD->isFileVarDecl() && "Expected file scoped var"); 11143 11144 // If the decl is marked as `declare target to`, it should be emitted for the 11145 // host and for the device. 11146 if (LangOpts.OpenMP && 11147 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 11148 return true; 11149 11150 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && 11151 !isMSStaticDataMemberInlineDefinition(VD)) 11152 return false; 11153 11154 // Variables that can be needed in other TUs are required. 11155 auto Linkage = GetGVALinkageForVariable(VD); 11156 if (!isDiscardableGVALinkage(Linkage)) 11157 return true; 11158 11159 // We never need to emit a variable that is available in another TU. 11160 if (Linkage == GVA_AvailableExternally) 11161 return false; 11162 11163 // Variables that have destruction with side-effects are required. 11164 if (VD->needsDestruction(*this)) 11165 return true; 11166 11167 // Variables that have initialization with side-effects are required. 11168 if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && 11169 // We can get a value-dependent initializer during error recovery. 11170 (VD->getInit()->isValueDependent() || !VD->evaluateValue())) 11171 return true; 11172 11173 // Likewise, variables with tuple-like bindings are required if their 11174 // bindings have side-effects. 11175 if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) 11176 for (const auto *BD : DD->bindings()) 11177 if (const auto *BindingVD = BD->getHoldingVar()) 11178 if (DeclMustBeEmitted(BindingVD)) 11179 return true; 11180 11181 return false; 11182 } 11183 11184 void ASTContext::forEachMultiversionedFunctionVersion( 11185 const FunctionDecl *FD, 11186 llvm::function_ref<void(FunctionDecl *)> Pred) const { 11187 assert(FD->isMultiVersion() && "Only valid for multiversioned functions"); 11188 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls; 11189 FD = FD->getMostRecentDecl(); 11190 // FIXME: The order of traversal here matters and depends on the order of 11191 // lookup results, which happens to be (mostly) oldest-to-newest, but we 11192 // shouldn't rely on that. 11193 for (auto *CurDecl : 11194 FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { 11195 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); 11196 if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && 11197 std::end(SeenDecls) == llvm::find(SeenDecls, CurFD)) { 11198 SeenDecls.insert(CurFD); 11199 Pred(CurFD); 11200 } 11201 } 11202 } 11203 11204 CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, 11205 bool IsCXXMethod, 11206 bool IsBuiltin) const { 11207 // Pass through to the C++ ABI object 11208 if (IsCXXMethod) 11209 return ABI->getDefaultMethodCallConv(IsVariadic); 11210 11211 // Builtins ignore user-specified default calling convention and remain the 11212 // Target's default calling convention. 11213 if (!IsBuiltin) { 11214 switch (LangOpts.getDefaultCallingConv()) { 11215 case LangOptions::DCC_None: 11216 break; 11217 case LangOptions::DCC_CDecl: 11218 return CC_C; 11219 case LangOptions::DCC_FastCall: 11220 if (getTargetInfo().hasFeature("sse2") && !IsVariadic) 11221 return CC_X86FastCall; 11222 break; 11223 case LangOptions::DCC_StdCall: 11224 if (!IsVariadic) 11225 return CC_X86StdCall; 11226 break; 11227 case LangOptions::DCC_VectorCall: 11228 // __vectorcall cannot be applied to variadic functions. 11229 if (!IsVariadic) 11230 return CC_X86VectorCall; 11231 break; 11232 case LangOptions::DCC_RegCall: 11233 // __regcall cannot be applied to variadic functions. 11234 if (!IsVariadic) 11235 return CC_X86RegCall; 11236 break; 11237 } 11238 } 11239 return Target->getDefaultCallingConv(); 11240 } 11241 11242 bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { 11243 // Pass through to the C++ ABI object 11244 return ABI->isNearlyEmpty(RD); 11245 } 11246 11247 VTableContextBase *ASTContext::getVTableContext() { 11248 if (!VTContext.get()) { 11249 auto ABI = Target->getCXXABI(); 11250 if (ABI.isMicrosoft()) 11251 VTContext.reset(new MicrosoftVTableContext(*this)); 11252 else { 11253 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables 11254 ? ItaniumVTableContext::Relative 11255 : ItaniumVTableContext::Pointer; 11256 VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); 11257 } 11258 } 11259 return VTContext.get(); 11260 } 11261 11262 MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { 11263 if (!T) 11264 T = Target; 11265 switch (T->getCXXABI().getKind()) { 11266 case TargetCXXABI::AppleARM64: 11267 case TargetCXXABI::Fuchsia: 11268 case TargetCXXABI::GenericAArch64: 11269 case TargetCXXABI::GenericItanium: 11270 case TargetCXXABI::GenericARM: 11271 case TargetCXXABI::GenericMIPS: 11272 case TargetCXXABI::iOS: 11273 case TargetCXXABI::WebAssembly: 11274 case TargetCXXABI::WatchOS: 11275 case TargetCXXABI::XL: 11276 return ItaniumMangleContext::create(*this, getDiagnostics()); 11277 case TargetCXXABI::Microsoft: 11278 return MicrosoftMangleContext::create(*this, getDiagnostics()); 11279 } 11280 llvm_unreachable("Unsupported ABI"); 11281 } 11282 11283 MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) { 11284 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft && 11285 "Device mangle context does not support Microsoft mangling."); 11286 switch (T.getCXXABI().getKind()) { 11287 case TargetCXXABI::AppleARM64: 11288 case TargetCXXABI::Fuchsia: 11289 case TargetCXXABI::GenericAArch64: 11290 case TargetCXXABI::GenericItanium: 11291 case TargetCXXABI::GenericARM: 11292 case TargetCXXABI::GenericMIPS: 11293 case TargetCXXABI::iOS: 11294 case TargetCXXABI::WebAssembly: 11295 case TargetCXXABI::WatchOS: 11296 case TargetCXXABI::XL: 11297 return ItaniumMangleContext::create( 11298 *this, getDiagnostics(), 11299 [](ASTContext &, const NamedDecl *ND) -> llvm::Optional<unsigned> { 11300 if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) 11301 return RD->getDeviceLambdaManglingNumber(); 11302 return llvm::None; 11303 }); 11304 case TargetCXXABI::Microsoft: 11305 return MicrosoftMangleContext::create(*this, getDiagnostics()); 11306 } 11307 llvm_unreachable("Unsupported ABI"); 11308 } 11309 11310 CXXABI::~CXXABI() = default; 11311 11312 size_t ASTContext::getSideTableAllocatedMemory() const { 11313 return ASTRecordLayouts.getMemorySize() + 11314 llvm::capacity_in_bytes(ObjCLayouts) + 11315 llvm::capacity_in_bytes(KeyFunctions) + 11316 llvm::capacity_in_bytes(ObjCImpls) + 11317 llvm::capacity_in_bytes(BlockVarCopyInits) + 11318 llvm::capacity_in_bytes(DeclAttrs) + 11319 llvm::capacity_in_bytes(TemplateOrInstantiation) + 11320 llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + 11321 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + 11322 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + 11323 llvm::capacity_in_bytes(OverriddenMethods) + 11324 llvm::capacity_in_bytes(Types) + 11325 llvm::capacity_in_bytes(VariableArrayTypes); 11326 } 11327 11328 /// getIntTypeForBitwidth - 11329 /// sets integer QualTy according to specified details: 11330 /// bitwidth, signed/unsigned. 11331 /// Returns empty type if there is no appropriate target types. 11332 QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, 11333 unsigned Signed) const { 11334 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); 11335 CanQualType QualTy = getFromTargetType(Ty); 11336 if (!QualTy && DestWidth == 128) 11337 return Signed ? Int128Ty : UnsignedInt128Ty; 11338 return QualTy; 11339 } 11340 11341 /// getRealTypeForBitwidth - 11342 /// sets floating point QualTy according to specified bitwidth. 11343 /// Returns empty type if there is no appropriate target types. 11344 QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, 11345 FloatModeKind ExplicitType) const { 11346 FloatModeKind Ty = 11347 getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitType); 11348 switch (Ty) { 11349 case FloatModeKind::Float: 11350 return FloatTy; 11351 case FloatModeKind::Double: 11352 return DoubleTy; 11353 case FloatModeKind::LongDouble: 11354 return LongDoubleTy; 11355 case FloatModeKind::Float128: 11356 return Float128Ty; 11357 case FloatModeKind::Ibm128: 11358 return Ibm128Ty; 11359 case FloatModeKind::NoFloat: 11360 return {}; 11361 } 11362 11363 llvm_unreachable("Unhandled TargetInfo::RealType value"); 11364 } 11365 11366 void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { 11367 if (Number > 1) 11368 MangleNumbers[ND] = Number; 11369 } 11370 11371 unsigned ASTContext::getManglingNumber(const NamedDecl *ND) const { 11372 auto I = MangleNumbers.find(ND); 11373 return I != MangleNumbers.end() ? I->second : 1; 11374 } 11375 11376 void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { 11377 if (Number > 1) 11378 StaticLocalNumbers[VD] = Number; 11379 } 11380 11381 unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { 11382 auto I = StaticLocalNumbers.find(VD); 11383 return I != StaticLocalNumbers.end() ? I->second : 1; 11384 } 11385 11386 MangleNumberingContext & 11387 ASTContext::getManglingNumberContext(const DeclContext *DC) { 11388 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 11389 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC]; 11390 if (!MCtx) 11391 MCtx = createMangleNumberingContext(); 11392 return *MCtx; 11393 } 11394 11395 MangleNumberingContext & 11396 ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { 11397 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 11398 std::unique_ptr<MangleNumberingContext> &MCtx = 11399 ExtraMangleNumberingContexts[D]; 11400 if (!MCtx) 11401 MCtx = createMangleNumberingContext(); 11402 return *MCtx; 11403 } 11404 11405 std::unique_ptr<MangleNumberingContext> 11406 ASTContext::createMangleNumberingContext() const { 11407 return ABI->createMangleNumberingContext(); 11408 } 11409 11410 const CXXConstructorDecl * 11411 ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { 11412 return ABI->getCopyConstructorForExceptionObject( 11413 cast<CXXRecordDecl>(RD->getFirstDecl())); 11414 } 11415 11416 void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, 11417 CXXConstructorDecl *CD) { 11418 return ABI->addCopyConstructorForExceptionObject( 11419 cast<CXXRecordDecl>(RD->getFirstDecl()), 11420 cast<CXXConstructorDecl>(CD->getFirstDecl())); 11421 } 11422 11423 void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, 11424 TypedefNameDecl *DD) { 11425 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); 11426 } 11427 11428 TypedefNameDecl * 11429 ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { 11430 return ABI->getTypedefNameForUnnamedTagDecl(TD); 11431 } 11432 11433 void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, 11434 DeclaratorDecl *DD) { 11435 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); 11436 } 11437 11438 DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { 11439 return ABI->getDeclaratorForUnnamedTagDecl(TD); 11440 } 11441 11442 void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { 11443 ParamIndices[D] = index; 11444 } 11445 11446 unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { 11447 ParameterIndexTable::const_iterator I = ParamIndices.find(D); 11448 assert(I != ParamIndices.end() && 11449 "ParmIndices lacks entry set by ParmVarDecl"); 11450 return I->second; 11451 } 11452 11453 QualType ASTContext::getStringLiteralArrayType(QualType EltTy, 11454 unsigned Length) const { 11455 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). 11456 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) 11457 EltTy = EltTy.withConst(); 11458 11459 EltTy = adjustStringLiteralBaseType(EltTy); 11460 11461 // Get an array type for the string, according to C99 6.4.5. This includes 11462 // the null terminator character. 11463 return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, 11464 ArrayType::Normal, /*IndexTypeQuals*/ 0); 11465 } 11466 11467 StringLiteral * 11468 ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { 11469 StringLiteral *&Result = StringLiteralCache[Key]; 11470 if (!Result) 11471 Result = StringLiteral::Create( 11472 *this, Key, StringLiteral::Ascii, 11473 /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), 11474 SourceLocation()); 11475 return Result; 11476 } 11477 11478 MSGuidDecl * 11479 ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { 11480 assert(MSGuidTagDecl && "building MS GUID without MS extensions?"); 11481 11482 llvm::FoldingSetNodeID ID; 11483 MSGuidDecl::Profile(ID, Parts); 11484 11485 void *InsertPos; 11486 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) 11487 return Existing; 11488 11489 QualType GUIDType = getMSGuidType().withConst(); 11490 MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); 11491 MSGuidDecls.InsertNode(New, InsertPos); 11492 return New; 11493 } 11494 11495 TemplateParamObjectDecl * 11496 ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { 11497 assert(T->isRecordType() && "template param object of unexpected type"); 11498 11499 // C++ [temp.param]p8: 11500 // [...] a static storage duration object of type 'const T' [...] 11501 T.addConst(); 11502 11503 llvm::FoldingSetNodeID ID; 11504 TemplateParamObjectDecl::Profile(ID, T, V); 11505 11506 void *InsertPos; 11507 if (TemplateParamObjectDecl *Existing = 11508 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos)) 11509 return Existing; 11510 11511 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V); 11512 TemplateParamObjectDecls.InsertNode(New, InsertPos); 11513 return New; 11514 } 11515 11516 bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { 11517 const llvm::Triple &T = getTargetInfo().getTriple(); 11518 if (!T.isOSDarwin()) 11519 return false; 11520 11521 if (!(T.isiOS() && T.isOSVersionLT(7)) && 11522 !(T.isMacOSX() && T.isOSVersionLT(10, 9))) 11523 return false; 11524 11525 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 11526 CharUnits sizeChars = getTypeSizeInChars(AtomicTy); 11527 uint64_t Size = sizeChars.getQuantity(); 11528 CharUnits alignChars = getTypeAlignInChars(AtomicTy); 11529 unsigned Align = alignChars.getQuantity(); 11530 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); 11531 return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); 11532 } 11533 11534 bool 11535 ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, 11536 const ObjCMethodDecl *MethodImpl) { 11537 // No point trying to match an unavailable/deprecated mothod. 11538 if (MethodDecl->hasAttr<UnavailableAttr>() 11539 || MethodDecl->hasAttr<DeprecatedAttr>()) 11540 return false; 11541 if (MethodDecl->getObjCDeclQualifier() != 11542 MethodImpl->getObjCDeclQualifier()) 11543 return false; 11544 if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) 11545 return false; 11546 11547 if (MethodDecl->param_size() != MethodImpl->param_size()) 11548 return false; 11549 11550 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), 11551 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), 11552 EF = MethodDecl->param_end(); 11553 IM != EM && IF != EF; ++IM, ++IF) { 11554 const ParmVarDecl *DeclVar = (*IF); 11555 const ParmVarDecl *ImplVar = (*IM); 11556 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) 11557 return false; 11558 if (!hasSameType(DeclVar->getType(), ImplVar->getType())) 11559 return false; 11560 } 11561 11562 return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); 11563 } 11564 11565 uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { 11566 LangAS AS; 11567 if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) 11568 AS = LangAS::Default; 11569 else 11570 AS = QT->getPointeeType().getAddressSpace(); 11571 11572 return getTargetInfo().getNullPointerValue(AS); 11573 } 11574 11575 unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { 11576 if (isTargetAddressSpace(AS)) 11577 return toTargetAddressSpace(AS); 11578 else 11579 return (*AddrSpaceMap)[(unsigned)AS]; 11580 } 11581 11582 QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { 11583 assert(Ty->isFixedPointType()); 11584 11585 if (Ty->isSaturatedFixedPointType()) return Ty; 11586 11587 switch (Ty->castAs<BuiltinType>()->getKind()) { 11588 default: 11589 llvm_unreachable("Not a fixed point type!"); 11590 case BuiltinType::ShortAccum: 11591 return SatShortAccumTy; 11592 case BuiltinType::Accum: 11593 return SatAccumTy; 11594 case BuiltinType::LongAccum: 11595 return SatLongAccumTy; 11596 case BuiltinType::UShortAccum: 11597 return SatUnsignedShortAccumTy; 11598 case BuiltinType::UAccum: 11599 return SatUnsignedAccumTy; 11600 case BuiltinType::ULongAccum: 11601 return SatUnsignedLongAccumTy; 11602 case BuiltinType::ShortFract: 11603 return SatShortFractTy; 11604 case BuiltinType::Fract: 11605 return SatFractTy; 11606 case BuiltinType::LongFract: 11607 return SatLongFractTy; 11608 case BuiltinType::UShortFract: 11609 return SatUnsignedShortFractTy; 11610 case BuiltinType::UFract: 11611 return SatUnsignedFractTy; 11612 case BuiltinType::ULongFract: 11613 return SatUnsignedLongFractTy; 11614 } 11615 } 11616 11617 LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { 11618 if (LangOpts.OpenCL) 11619 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); 11620 11621 if (LangOpts.CUDA) 11622 return getTargetInfo().getCUDABuiltinAddressSpace(AS); 11623 11624 return getLangASFromTargetAS(AS); 11625 } 11626 11627 // Explicitly instantiate this in case a Redeclarable<T> is used from a TU that 11628 // doesn't include ASTContext.h 11629 template 11630 clang::LazyGenerationalUpdatePtr< 11631 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType 11632 clang::LazyGenerationalUpdatePtr< 11633 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( 11634 const clang::ASTContext &Ctx, Decl *Value); 11635 11636 unsigned char ASTContext::getFixedPointScale(QualType Ty) const { 11637 assert(Ty->isFixedPointType()); 11638 11639 const TargetInfo &Target = getTargetInfo(); 11640 switch (Ty->castAs<BuiltinType>()->getKind()) { 11641 default: 11642 llvm_unreachable("Not a fixed point type!"); 11643 case BuiltinType::ShortAccum: 11644 case BuiltinType::SatShortAccum: 11645 return Target.getShortAccumScale(); 11646 case BuiltinType::Accum: 11647 case BuiltinType::SatAccum: 11648 return Target.getAccumScale(); 11649 case BuiltinType::LongAccum: 11650 case BuiltinType::SatLongAccum: 11651 return Target.getLongAccumScale(); 11652 case BuiltinType::UShortAccum: 11653 case BuiltinType::SatUShortAccum: 11654 return Target.getUnsignedShortAccumScale(); 11655 case BuiltinType::UAccum: 11656 case BuiltinType::SatUAccum: 11657 return Target.getUnsignedAccumScale(); 11658 case BuiltinType::ULongAccum: 11659 case BuiltinType::SatULongAccum: 11660 return Target.getUnsignedLongAccumScale(); 11661 case BuiltinType::ShortFract: 11662 case BuiltinType::SatShortFract: 11663 return Target.getShortFractScale(); 11664 case BuiltinType::Fract: 11665 case BuiltinType::SatFract: 11666 return Target.getFractScale(); 11667 case BuiltinType::LongFract: 11668 case BuiltinType::SatLongFract: 11669 return Target.getLongFractScale(); 11670 case BuiltinType::UShortFract: 11671 case BuiltinType::SatUShortFract: 11672 return Target.getUnsignedShortFractScale(); 11673 case BuiltinType::UFract: 11674 case BuiltinType::SatUFract: 11675 return Target.getUnsignedFractScale(); 11676 case BuiltinType::ULongFract: 11677 case BuiltinType::SatULongFract: 11678 return Target.getUnsignedLongFractScale(); 11679 } 11680 } 11681 11682 unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { 11683 assert(Ty->isFixedPointType()); 11684 11685 const TargetInfo &Target = getTargetInfo(); 11686 switch (Ty->castAs<BuiltinType>()->getKind()) { 11687 default: 11688 llvm_unreachable("Not a fixed point type!"); 11689 case BuiltinType::ShortAccum: 11690 case BuiltinType::SatShortAccum: 11691 return Target.getShortAccumIBits(); 11692 case BuiltinType::Accum: 11693 case BuiltinType::SatAccum: 11694 return Target.getAccumIBits(); 11695 case BuiltinType::LongAccum: 11696 case BuiltinType::SatLongAccum: 11697 return Target.getLongAccumIBits(); 11698 case BuiltinType::UShortAccum: 11699 case BuiltinType::SatUShortAccum: 11700 return Target.getUnsignedShortAccumIBits(); 11701 case BuiltinType::UAccum: 11702 case BuiltinType::SatUAccum: 11703 return Target.getUnsignedAccumIBits(); 11704 case BuiltinType::ULongAccum: 11705 case BuiltinType::SatULongAccum: 11706 return Target.getUnsignedLongAccumIBits(); 11707 case BuiltinType::ShortFract: 11708 case BuiltinType::SatShortFract: 11709 case BuiltinType::Fract: 11710 case BuiltinType::SatFract: 11711 case BuiltinType::LongFract: 11712 case BuiltinType::SatLongFract: 11713 case BuiltinType::UShortFract: 11714 case BuiltinType::SatUShortFract: 11715 case BuiltinType::UFract: 11716 case BuiltinType::SatUFract: 11717 case BuiltinType::ULongFract: 11718 case BuiltinType::SatULongFract: 11719 return 0; 11720 } 11721 } 11722 11723 llvm::FixedPointSemantics 11724 ASTContext::getFixedPointSemantics(QualType Ty) const { 11725 assert((Ty->isFixedPointType() || Ty->isIntegerType()) && 11726 "Can only get the fixed point semantics for a " 11727 "fixed point or integer type."); 11728 if (Ty->isIntegerType()) 11729 return llvm::FixedPointSemantics::GetIntegerSemantics( 11730 getIntWidth(Ty), Ty->isSignedIntegerType()); 11731 11732 bool isSigned = Ty->isSignedFixedPointType(); 11733 return llvm::FixedPointSemantics( 11734 static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, 11735 Ty->isSaturatedFixedPointType(), 11736 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); 11737 } 11738 11739 llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { 11740 assert(Ty->isFixedPointType()); 11741 return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty)); 11742 } 11743 11744 llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { 11745 assert(Ty->isFixedPointType()); 11746 return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty)); 11747 } 11748 11749 QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { 11750 assert(Ty->isUnsignedFixedPointType() && 11751 "Expected unsigned fixed point type"); 11752 11753 switch (Ty->castAs<BuiltinType>()->getKind()) { 11754 case BuiltinType::UShortAccum: 11755 return ShortAccumTy; 11756 case BuiltinType::UAccum: 11757 return AccumTy; 11758 case BuiltinType::ULongAccum: 11759 return LongAccumTy; 11760 case BuiltinType::SatUShortAccum: 11761 return SatShortAccumTy; 11762 case BuiltinType::SatUAccum: 11763 return SatAccumTy; 11764 case BuiltinType::SatULongAccum: 11765 return SatLongAccumTy; 11766 case BuiltinType::UShortFract: 11767 return ShortFractTy; 11768 case BuiltinType::UFract: 11769 return FractTy; 11770 case BuiltinType::ULongFract: 11771 return LongFractTy; 11772 case BuiltinType::SatUShortFract: 11773 return SatShortFractTy; 11774 case BuiltinType::SatUFract: 11775 return SatFractTy; 11776 case BuiltinType::SatULongFract: 11777 return SatLongFractTy; 11778 default: 11779 llvm_unreachable("Unexpected unsigned fixed point type"); 11780 } 11781 } 11782 11783 ParsedTargetAttr 11784 ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { 11785 assert(TD != nullptr); 11786 ParsedTargetAttr ParsedAttr = TD->parse(); 11787 11788 llvm::erase_if(ParsedAttr.Features, [&](const std::string &Feat) { 11789 return !Target->isValidFeatureName(StringRef{Feat}.substr(1)); 11790 }); 11791 return ParsedAttr; 11792 } 11793 11794 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 11795 const FunctionDecl *FD) const { 11796 if (FD) 11797 getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); 11798 else 11799 Target->initFeatureMap(FeatureMap, getDiagnostics(), 11800 Target->getTargetOpts().CPU, 11801 Target->getTargetOpts().Features); 11802 } 11803 11804 // Fills in the supplied string map with the set of target features for the 11805 // passed in function. 11806 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 11807 GlobalDecl GD) const { 11808 StringRef TargetCPU = Target->getTargetOpts().CPU; 11809 const FunctionDecl *FD = GD.getDecl()->getAsFunction(); 11810 if (const auto *TD = FD->getAttr<TargetAttr>()) { 11811 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); 11812 11813 // Make a copy of the features as passed on the command line into the 11814 // beginning of the additional features from the function to override. 11815 ParsedAttr.Features.insert( 11816 ParsedAttr.Features.begin(), 11817 Target->getTargetOpts().FeaturesAsWritten.begin(), 11818 Target->getTargetOpts().FeaturesAsWritten.end()); 11819 11820 if (ParsedAttr.Architecture != "" && 11821 Target->isValidCPUName(ParsedAttr.Architecture)) 11822 TargetCPU = ParsedAttr.Architecture; 11823 11824 // Now populate the feature map, first with the TargetCPU which is either 11825 // the default or a new one from the target attribute string. Then we'll use 11826 // the passed in features (FeaturesAsWritten) along with the new ones from 11827 // the attribute. 11828 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, 11829 ParsedAttr.Features); 11830 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) { 11831 llvm::SmallVector<StringRef, 32> FeaturesTmp; 11832 Target->getCPUSpecificCPUDispatchFeatures( 11833 SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); 11834 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end()); 11835 Features.insert(Features.begin(), 11836 Target->getTargetOpts().FeaturesAsWritten.begin(), 11837 Target->getTargetOpts().FeaturesAsWritten.end()); 11838 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 11839 } else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) { 11840 std::vector<std::string> Features; 11841 StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex()); 11842 if (VersionStr.startswith("arch=")) 11843 TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1); 11844 else if (VersionStr != "default") 11845 Features.push_back((StringRef{"+"} + VersionStr).str()); 11846 11847 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 11848 } else { 11849 FeatureMap = Target->getTargetOpts().FeatureMap; 11850 } 11851 } 11852 11853 OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { 11854 OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); 11855 return *OMPTraitInfoVector.back(); 11856 } 11857 11858 const StreamingDiagnostic &clang:: 11859 operator<<(const StreamingDiagnostic &DB, 11860 const ASTContext::SectionInfo &Section) { 11861 if (Section.Decl) 11862 return DB << Section.Decl; 11863 return DB << "a prior #pragma section"; 11864 } 11865 11866 bool ASTContext::mayExternalizeStaticVar(const Decl *D) const { 11867 bool IsStaticVar = 11868 isa<VarDecl>(D) && cast<VarDecl>(D)->getStorageClass() == SC_Static; 11869 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() && 11870 !D->getAttr<CUDADeviceAttr>()->isImplicit()) || 11871 (D->hasAttr<CUDAConstantAttr>() && 11872 !D->getAttr<CUDAConstantAttr>()->isImplicit()); 11873 // CUDA/HIP: static managed variables need to be externalized since it is 11874 // a declaration in IR, therefore cannot have internal linkage. 11875 return IsStaticVar && 11876 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar); 11877 } 11878 11879 bool ASTContext::shouldExternalizeStaticVar(const Decl *D) const { 11880 return mayExternalizeStaticVar(D) && 11881 (D->hasAttr<HIPManagedAttr>() || 11882 CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D))); 11883 } 11884 11885 StringRef ASTContext::getCUIDHash() const { 11886 if (!CUIDHash.empty()) 11887 return CUIDHash; 11888 if (LangOpts.CUID.empty()) 11889 return StringRef(); 11890 CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); 11891 return CUIDHash; 11892 } 11893