1 //===- ASTContext.cpp - Context to hold long-lived AST nodes --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ASTContext interface. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "clang/AST/ASTContext.h" 14 #include "CXXABI.h" 15 #include "Interp/Context.h" 16 #include "clang/AST/APValue.h" 17 #include "clang/AST/ASTConcept.h" 18 #include "clang/AST/ASTMutationListener.h" 19 #include "clang/AST/ASTTypeTraits.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/AttrIterator.h" 22 #include "clang/AST/CharUnits.h" 23 #include "clang/AST/Comment.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclBase.h" 26 #include "clang/AST/DeclCXX.h" 27 #include "clang/AST/DeclContextInternals.h" 28 #include "clang/AST/DeclObjC.h" 29 #include "clang/AST/DeclOpenMP.h" 30 #include "clang/AST/DeclTemplate.h" 31 #include "clang/AST/DeclarationName.h" 32 #include "clang/AST/DependenceFlags.h" 33 #include "clang/AST/Expr.h" 34 #include "clang/AST/ExprCXX.h" 35 #include "clang/AST/ExprConcepts.h" 36 #include "clang/AST/ExternalASTSource.h" 37 #include "clang/AST/Mangle.h" 38 #include "clang/AST/MangleNumberingContext.h" 39 #include "clang/AST/NestedNameSpecifier.h" 40 #include "clang/AST/ParentMapContext.h" 41 #include "clang/AST/RawCommentList.h" 42 #include "clang/AST/RecordLayout.h" 43 #include "clang/AST/Stmt.h" 44 #include "clang/AST/TemplateBase.h" 45 #include "clang/AST/TemplateName.h" 46 #include "clang/AST/Type.h" 47 #include "clang/AST/TypeLoc.h" 48 #include "clang/AST/UnresolvedSet.h" 49 #include "clang/AST/VTableBuilder.h" 50 #include "clang/Basic/AddressSpaces.h" 51 #include "clang/Basic/Builtins.h" 52 #include "clang/Basic/CommentOptions.h" 53 #include "clang/Basic/ExceptionSpecificationType.h" 54 #include "clang/Basic/IdentifierTable.h" 55 #include "clang/Basic/LLVM.h" 56 #include "clang/Basic/LangOptions.h" 57 #include "clang/Basic/Linkage.h" 58 #include "clang/Basic/Module.h" 59 #include "clang/Basic/NoSanitizeList.h" 60 #include "clang/Basic/ObjCRuntime.h" 61 #include "clang/Basic/SourceLocation.h" 62 #include "clang/Basic/SourceManager.h" 63 #include "clang/Basic/Specifiers.h" 64 #include "clang/Basic/TargetCXXABI.h" 65 #include "clang/Basic/TargetInfo.h" 66 #include "clang/Basic/XRayLists.h" 67 #include "llvm/ADT/APFixedPoint.h" 68 #include "llvm/ADT/APInt.h" 69 #include "llvm/ADT/APSInt.h" 70 #include "llvm/ADT/ArrayRef.h" 71 #include "llvm/ADT/DenseMap.h" 72 #include "llvm/ADT/DenseSet.h" 73 #include "llvm/ADT/FoldingSet.h" 74 #include "llvm/ADT/None.h" 75 #include "llvm/ADT/Optional.h" 76 #include "llvm/ADT/PointerUnion.h" 77 #include "llvm/ADT/STLExtras.h" 78 #include "llvm/ADT/SmallPtrSet.h" 79 #include "llvm/ADT/SmallVector.h" 80 #include "llvm/ADT/StringExtras.h" 81 #include "llvm/ADT/StringRef.h" 82 #include "llvm/ADT/Triple.h" 83 #include "llvm/Support/Capacity.h" 84 #include "llvm/Support/Casting.h" 85 #include "llvm/Support/Compiler.h" 86 #include "llvm/Support/ErrorHandling.h" 87 #include "llvm/Support/MD5.h" 88 #include "llvm/Support/MathExtras.h" 89 #include "llvm/Support/raw_ostream.h" 90 #include <algorithm> 91 #include <cassert> 92 #include <cstddef> 93 #include <cstdint> 94 #include <cstdlib> 95 #include <map> 96 #include <memory> 97 #include <string> 98 #include <tuple> 99 #include <utility> 100 101 using namespace clang; 102 103 enum FloatingRank { 104 BFloat16Rank, 105 Float16Rank, 106 HalfRank, 107 FloatRank, 108 DoubleRank, 109 LongDoubleRank, 110 Float128Rank, 111 Ibm128Rank 112 }; 113 114 /// \returns location that is relevant when searching for Doc comments related 115 /// to \p D. 116 static SourceLocation getDeclLocForCommentSearch(const Decl *D, 117 SourceManager &SourceMgr) { 118 assert(D); 119 120 // User can not attach documentation to implicit declarations. 121 if (D->isImplicit()) 122 return {}; 123 124 // User can not attach documentation to implicit instantiations. 125 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 126 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 127 return {}; 128 } 129 130 if (const auto *VD = dyn_cast<VarDecl>(D)) { 131 if (VD->isStaticDataMember() && 132 VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 133 return {}; 134 } 135 136 if (const auto *CRD = dyn_cast<CXXRecordDecl>(D)) { 137 if (CRD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 138 return {}; 139 } 140 141 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) { 142 TemplateSpecializationKind TSK = CTSD->getSpecializationKind(); 143 if (TSK == TSK_ImplicitInstantiation || 144 TSK == TSK_Undeclared) 145 return {}; 146 } 147 148 if (const auto *ED = dyn_cast<EnumDecl>(D)) { 149 if (ED->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) 150 return {}; 151 } 152 if (const auto *TD = dyn_cast<TagDecl>(D)) { 153 // When tag declaration (but not definition!) is part of the 154 // decl-specifier-seq of some other declaration, it doesn't get comment 155 if (TD->isEmbeddedInDeclarator() && !TD->isCompleteDefinition()) 156 return {}; 157 } 158 // TODO: handle comments for function parameters properly. 159 if (isa<ParmVarDecl>(D)) 160 return {}; 161 162 // TODO: we could look up template parameter documentation in the template 163 // documentation. 164 if (isa<TemplateTypeParmDecl>(D) || 165 isa<NonTypeTemplateParmDecl>(D) || 166 isa<TemplateTemplateParmDecl>(D)) 167 return {}; 168 169 // Find declaration location. 170 // For Objective-C declarations we generally don't expect to have multiple 171 // declarators, thus use declaration starting location as the "declaration 172 // location". 173 // For all other declarations multiple declarators are used quite frequently, 174 // so we use the location of the identifier as the "declaration location". 175 if (isa<ObjCMethodDecl>(D) || isa<ObjCContainerDecl>(D) || 176 isa<ObjCPropertyDecl>(D) || 177 isa<RedeclarableTemplateDecl>(D) || 178 isa<ClassTemplateSpecializationDecl>(D) || 179 // Allow association with Y across {} in `typedef struct X {} Y`. 180 isa<TypedefDecl>(D)) 181 return D->getBeginLoc(); 182 183 const SourceLocation DeclLoc = D->getLocation(); 184 if (DeclLoc.isMacroID()) { 185 if (isa<TypedefDecl>(D)) { 186 // If location of the typedef name is in a macro, it is because being 187 // declared via a macro. Try using declaration's starting location as 188 // the "declaration location". 189 return D->getBeginLoc(); 190 } 191 192 if (const auto *TD = dyn_cast<TagDecl>(D)) { 193 // If location of the tag decl is inside a macro, but the spelling of 194 // the tag name comes from a macro argument, it looks like a special 195 // macro like NS_ENUM is being used to define the tag decl. In that 196 // case, adjust the source location to the expansion loc so that we can 197 // attach the comment to the tag decl. 198 if (SourceMgr.isMacroArgExpansion(DeclLoc) && TD->isCompleteDefinition()) 199 return SourceMgr.getExpansionLoc(DeclLoc); 200 } 201 } 202 203 return DeclLoc; 204 } 205 206 RawComment *ASTContext::getRawCommentForDeclNoCacheImpl( 207 const Decl *D, const SourceLocation RepresentativeLocForDecl, 208 const std::map<unsigned, RawComment *> &CommentsInTheFile) const { 209 // If the declaration doesn't map directly to a location in a file, we 210 // can't find the comment. 211 if (RepresentativeLocForDecl.isInvalid() || 212 !RepresentativeLocForDecl.isFileID()) 213 return nullptr; 214 215 // If there are no comments anywhere, we won't find anything. 216 if (CommentsInTheFile.empty()) 217 return nullptr; 218 219 // Decompose the location for the declaration and find the beginning of the 220 // file buffer. 221 const std::pair<FileID, unsigned> DeclLocDecomp = 222 SourceMgr.getDecomposedLoc(RepresentativeLocForDecl); 223 224 // Slow path. 225 auto OffsetCommentBehindDecl = 226 CommentsInTheFile.lower_bound(DeclLocDecomp.second); 227 228 // First check whether we have a trailing comment. 229 if (OffsetCommentBehindDecl != CommentsInTheFile.end()) { 230 RawComment *CommentBehindDecl = OffsetCommentBehindDecl->second; 231 if ((CommentBehindDecl->isDocumentation() || 232 LangOpts.CommentOpts.ParseAllComments) && 233 CommentBehindDecl->isTrailingComment() && 234 (isa<FieldDecl>(D) || isa<EnumConstantDecl>(D) || isa<VarDecl>(D) || 235 isa<ObjCMethodDecl>(D) || isa<ObjCPropertyDecl>(D))) { 236 237 // Check that Doxygen trailing comment comes after the declaration, starts 238 // on the same line and in the same file as the declaration. 239 if (SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == 240 Comments.getCommentBeginLine(CommentBehindDecl, DeclLocDecomp.first, 241 OffsetCommentBehindDecl->first)) { 242 return CommentBehindDecl; 243 } 244 } 245 } 246 247 // The comment just after the declaration was not a trailing comment. 248 // Let's look at the previous comment. 249 if (OffsetCommentBehindDecl == CommentsInTheFile.begin()) 250 return nullptr; 251 252 auto OffsetCommentBeforeDecl = --OffsetCommentBehindDecl; 253 RawComment *CommentBeforeDecl = OffsetCommentBeforeDecl->second; 254 255 // Check that we actually have a non-member Doxygen comment. 256 if (!(CommentBeforeDecl->isDocumentation() || 257 LangOpts.CommentOpts.ParseAllComments) || 258 CommentBeforeDecl->isTrailingComment()) 259 return nullptr; 260 261 // Decompose the end of the comment. 262 const unsigned CommentEndOffset = 263 Comments.getCommentEndOffset(CommentBeforeDecl); 264 265 // Get the corresponding buffer. 266 bool Invalid = false; 267 const char *Buffer = SourceMgr.getBufferData(DeclLocDecomp.first, 268 &Invalid).data(); 269 if (Invalid) 270 return nullptr; 271 272 // Extract text between the comment and declaration. 273 StringRef Text(Buffer + CommentEndOffset, 274 DeclLocDecomp.second - CommentEndOffset); 275 276 // There should be no other declarations or preprocessor directives between 277 // comment and declaration. 278 if (Text.find_first_of(";{}#@") != StringRef::npos) 279 return nullptr; 280 281 return CommentBeforeDecl; 282 } 283 284 RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { 285 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 286 287 // If the declaration doesn't map directly to a location in a file, we 288 // can't find the comment. 289 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 290 return nullptr; 291 292 if (ExternalSource && !CommentsLoaded) { 293 ExternalSource->ReadComments(); 294 CommentsLoaded = true; 295 } 296 297 if (Comments.empty()) 298 return nullptr; 299 300 const FileID File = SourceMgr.getDecomposedLoc(DeclLoc).first; 301 const auto CommentsInThisFile = Comments.getCommentsInFile(File); 302 if (!CommentsInThisFile || CommentsInThisFile->empty()) 303 return nullptr; 304 305 return getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile); 306 } 307 308 void ASTContext::addComment(const RawComment &RC) { 309 assert(LangOpts.RetainCommentsFromSystemHeaders || 310 !SourceMgr.isInSystemHeader(RC.getSourceRange().getBegin())); 311 Comments.addComment(RC, LangOpts.CommentOpts, BumpAlloc); 312 } 313 314 /// If we have a 'templated' declaration for a template, adjust 'D' to 315 /// refer to the actual template. 316 /// If we have an implicit instantiation, adjust 'D' to refer to template. 317 static const Decl &adjustDeclToTemplate(const Decl &D) { 318 if (const auto *FD = dyn_cast<FunctionDecl>(&D)) { 319 // Is this function declaration part of a function template? 320 if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) 321 return *FTD; 322 323 // Nothing to do if function is not an implicit instantiation. 324 if (FD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) 325 return D; 326 327 // Function is an implicit instantiation of a function template? 328 if (const FunctionTemplateDecl *FTD = FD->getPrimaryTemplate()) 329 return *FTD; 330 331 // Function is instantiated from a member definition of a class template? 332 if (const FunctionDecl *MemberDecl = 333 FD->getInstantiatedFromMemberFunction()) 334 return *MemberDecl; 335 336 return D; 337 } 338 if (const auto *VD = dyn_cast<VarDecl>(&D)) { 339 // Static data member is instantiated from a member definition of a class 340 // template? 341 if (VD->isStaticDataMember()) 342 if (const VarDecl *MemberDecl = VD->getInstantiatedFromStaticDataMember()) 343 return *MemberDecl; 344 345 return D; 346 } 347 if (const auto *CRD = dyn_cast<CXXRecordDecl>(&D)) { 348 // Is this class declaration part of a class template? 349 if (const ClassTemplateDecl *CTD = CRD->getDescribedClassTemplate()) 350 return *CTD; 351 352 // Class is an implicit instantiation of a class template or partial 353 // specialization? 354 if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(CRD)) { 355 if (CTSD->getSpecializationKind() != TSK_ImplicitInstantiation) 356 return D; 357 llvm::PointerUnion<ClassTemplateDecl *, 358 ClassTemplatePartialSpecializationDecl *> 359 PU = CTSD->getSpecializedTemplateOrPartial(); 360 return PU.is<ClassTemplateDecl *>() 361 ? *static_cast<const Decl *>(PU.get<ClassTemplateDecl *>()) 362 : *static_cast<const Decl *>( 363 PU.get<ClassTemplatePartialSpecializationDecl *>()); 364 } 365 366 // Class is instantiated from a member definition of a class template? 367 if (const MemberSpecializationInfo *Info = 368 CRD->getMemberSpecializationInfo()) 369 return *Info->getInstantiatedFrom(); 370 371 return D; 372 } 373 if (const auto *ED = dyn_cast<EnumDecl>(&D)) { 374 // Enum is instantiated from a member definition of a class template? 375 if (const EnumDecl *MemberDecl = ED->getInstantiatedFromMemberEnum()) 376 return *MemberDecl; 377 378 return D; 379 } 380 // FIXME: Adjust alias templates? 381 return D; 382 } 383 384 const RawComment *ASTContext::getRawCommentForAnyRedecl( 385 const Decl *D, 386 const Decl **OriginalDecl) const { 387 if (!D) { 388 if (OriginalDecl) 389 OriginalDecl = nullptr; 390 return nullptr; 391 } 392 393 D = &adjustDeclToTemplate(*D); 394 395 // Any comment directly attached to D? 396 { 397 auto DeclComment = DeclRawComments.find(D); 398 if (DeclComment != DeclRawComments.end()) { 399 if (OriginalDecl) 400 *OriginalDecl = D; 401 return DeclComment->second; 402 } 403 } 404 405 // Any comment attached to any redeclaration of D? 406 const Decl *CanonicalD = D->getCanonicalDecl(); 407 if (!CanonicalD) 408 return nullptr; 409 410 { 411 auto RedeclComment = RedeclChainComments.find(CanonicalD); 412 if (RedeclComment != RedeclChainComments.end()) { 413 if (OriginalDecl) 414 *OriginalDecl = RedeclComment->second; 415 auto CommentAtRedecl = DeclRawComments.find(RedeclComment->second); 416 assert(CommentAtRedecl != DeclRawComments.end() && 417 "This decl is supposed to have comment attached."); 418 return CommentAtRedecl->second; 419 } 420 } 421 422 // Any redeclarations of D that we haven't checked for comments yet? 423 // We can't use DenseMap::iterator directly since it'd get invalid. 424 auto LastCheckedRedecl = [this, CanonicalD]() -> const Decl * { 425 auto LookupRes = CommentlessRedeclChains.find(CanonicalD); 426 if (LookupRes != CommentlessRedeclChains.end()) 427 return LookupRes->second; 428 return nullptr; 429 }(); 430 431 for (const auto Redecl : D->redecls()) { 432 assert(Redecl); 433 // Skip all redeclarations that have been checked previously. 434 if (LastCheckedRedecl) { 435 if (LastCheckedRedecl == Redecl) { 436 LastCheckedRedecl = nullptr; 437 } 438 continue; 439 } 440 const RawComment *RedeclComment = getRawCommentForDeclNoCache(Redecl); 441 if (RedeclComment) { 442 cacheRawCommentForDecl(*Redecl, *RedeclComment); 443 if (OriginalDecl) 444 *OriginalDecl = Redecl; 445 return RedeclComment; 446 } 447 CommentlessRedeclChains[CanonicalD] = Redecl; 448 } 449 450 if (OriginalDecl) 451 *OriginalDecl = nullptr; 452 return nullptr; 453 } 454 455 void ASTContext::cacheRawCommentForDecl(const Decl &OriginalD, 456 const RawComment &Comment) const { 457 assert(Comment.isDocumentation() || LangOpts.CommentOpts.ParseAllComments); 458 DeclRawComments.try_emplace(&OriginalD, &Comment); 459 const Decl *const CanonicalDecl = OriginalD.getCanonicalDecl(); 460 RedeclChainComments.try_emplace(CanonicalDecl, &OriginalD); 461 CommentlessRedeclChains.erase(CanonicalDecl); 462 } 463 464 static void addRedeclaredMethods(const ObjCMethodDecl *ObjCMethod, 465 SmallVectorImpl<const NamedDecl *> &Redeclared) { 466 const DeclContext *DC = ObjCMethod->getDeclContext(); 467 if (const auto *IMD = dyn_cast<ObjCImplDecl>(DC)) { 468 const ObjCInterfaceDecl *ID = IMD->getClassInterface(); 469 if (!ID) 470 return; 471 // Add redeclared method here. 472 for (const auto *Ext : ID->known_extensions()) { 473 if (ObjCMethodDecl *RedeclaredMethod = 474 Ext->getMethod(ObjCMethod->getSelector(), 475 ObjCMethod->isInstanceMethod())) 476 Redeclared.push_back(RedeclaredMethod); 477 } 478 } 479 } 480 481 void ASTContext::attachCommentsToJustParsedDecls(ArrayRef<Decl *> Decls, 482 const Preprocessor *PP) { 483 if (Comments.empty() || Decls.empty()) 484 return; 485 486 FileID File; 487 for (Decl *D : Decls) { 488 SourceLocation Loc = D->getLocation(); 489 if (Loc.isValid()) { 490 // See if there are any new comments that are not attached to a decl. 491 // The location doesn't have to be precise - we care only about the file. 492 File = SourceMgr.getDecomposedLoc(Loc).first; 493 break; 494 } 495 } 496 497 if (File.isInvalid()) 498 return; 499 500 auto CommentsInThisFile = Comments.getCommentsInFile(File); 501 if (!CommentsInThisFile || CommentsInThisFile->empty() || 502 CommentsInThisFile->rbegin()->second->isAttached()) 503 return; 504 505 // There is at least one comment not attached to a decl. 506 // Maybe it should be attached to one of Decls? 507 // 508 // Note that this way we pick up not only comments that precede the 509 // declaration, but also comments that *follow* the declaration -- thanks to 510 // the lookahead in the lexer: we've consumed the semicolon and looked 511 // ahead through comments. 512 513 for (const Decl *D : Decls) { 514 assert(D); 515 if (D->isInvalidDecl()) 516 continue; 517 518 D = &adjustDeclToTemplate(*D); 519 520 const SourceLocation DeclLoc = getDeclLocForCommentSearch(D, SourceMgr); 521 522 if (DeclLoc.isInvalid() || !DeclLoc.isFileID()) 523 continue; 524 525 if (DeclRawComments.count(D) > 0) 526 continue; 527 528 if (RawComment *const DocComment = 529 getRawCommentForDeclNoCacheImpl(D, DeclLoc, *CommentsInThisFile)) { 530 cacheRawCommentForDecl(*D, *DocComment); 531 comments::FullComment *FC = DocComment->parse(*this, PP, D); 532 ParsedComments[D->getCanonicalDecl()] = FC; 533 } 534 } 535 } 536 537 comments::FullComment *ASTContext::cloneFullComment(comments::FullComment *FC, 538 const Decl *D) const { 539 auto *ThisDeclInfo = new (*this) comments::DeclInfo; 540 ThisDeclInfo->CommentDecl = D; 541 ThisDeclInfo->IsFilled = false; 542 ThisDeclInfo->fill(); 543 ThisDeclInfo->CommentDecl = FC->getDecl(); 544 if (!ThisDeclInfo->TemplateParameters) 545 ThisDeclInfo->TemplateParameters = FC->getDeclInfo()->TemplateParameters; 546 comments::FullComment *CFC = 547 new (*this) comments::FullComment(FC->getBlocks(), 548 ThisDeclInfo); 549 return CFC; 550 } 551 552 comments::FullComment *ASTContext::getLocalCommentForDeclUncached(const Decl *D) const { 553 const RawComment *RC = getRawCommentForDeclNoCache(D); 554 return RC ? RC->parse(*this, nullptr, D) : nullptr; 555 } 556 557 comments::FullComment *ASTContext::getCommentForDecl( 558 const Decl *D, 559 const Preprocessor *PP) const { 560 if (!D || D->isInvalidDecl()) 561 return nullptr; 562 D = &adjustDeclToTemplate(*D); 563 564 const Decl *Canonical = D->getCanonicalDecl(); 565 llvm::DenseMap<const Decl *, comments::FullComment *>::iterator Pos = 566 ParsedComments.find(Canonical); 567 568 if (Pos != ParsedComments.end()) { 569 if (Canonical != D) { 570 comments::FullComment *FC = Pos->second; 571 comments::FullComment *CFC = cloneFullComment(FC, D); 572 return CFC; 573 } 574 return Pos->second; 575 } 576 577 const Decl *OriginalDecl = nullptr; 578 579 const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); 580 if (!RC) { 581 if (isa<ObjCMethodDecl>(D) || isa<FunctionDecl>(D)) { 582 SmallVector<const NamedDecl*, 8> Overridden; 583 const auto *OMD = dyn_cast<ObjCMethodDecl>(D); 584 if (OMD && OMD->isPropertyAccessor()) 585 if (const ObjCPropertyDecl *PDecl = OMD->findPropertyDecl()) 586 if (comments::FullComment *FC = getCommentForDecl(PDecl, PP)) 587 return cloneFullComment(FC, D); 588 if (OMD) 589 addRedeclaredMethods(OMD, Overridden); 590 getOverriddenMethods(dyn_cast<NamedDecl>(D), Overridden); 591 for (unsigned i = 0, e = Overridden.size(); i < e; i++) 592 if (comments::FullComment *FC = getCommentForDecl(Overridden[i], PP)) 593 return cloneFullComment(FC, D); 594 } 595 else if (const auto *TD = dyn_cast<TypedefNameDecl>(D)) { 596 // Attach any tag type's documentation to its typedef if latter 597 // does not have one of its own. 598 QualType QT = TD->getUnderlyingType(); 599 if (const auto *TT = QT->getAs<TagType>()) 600 if (const Decl *TD = TT->getDecl()) 601 if (comments::FullComment *FC = getCommentForDecl(TD, PP)) 602 return cloneFullComment(FC, D); 603 } 604 else if (const auto *IC = dyn_cast<ObjCInterfaceDecl>(D)) { 605 while (IC->getSuperClass()) { 606 IC = IC->getSuperClass(); 607 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 608 return cloneFullComment(FC, D); 609 } 610 } 611 else if (const auto *CD = dyn_cast<ObjCCategoryDecl>(D)) { 612 if (const ObjCInterfaceDecl *IC = CD->getClassInterface()) 613 if (comments::FullComment *FC = getCommentForDecl(IC, PP)) 614 return cloneFullComment(FC, D); 615 } 616 else if (const auto *RD = dyn_cast<CXXRecordDecl>(D)) { 617 if (!(RD = RD->getDefinition())) 618 return nullptr; 619 // Check non-virtual bases. 620 for (const auto &I : RD->bases()) { 621 if (I.isVirtual() || (I.getAccessSpecifier() != AS_public)) 622 continue; 623 QualType Ty = I.getType(); 624 if (Ty.isNull()) 625 continue; 626 if (const CXXRecordDecl *NonVirtualBase = Ty->getAsCXXRecordDecl()) { 627 if (!(NonVirtualBase= NonVirtualBase->getDefinition())) 628 continue; 629 630 if (comments::FullComment *FC = getCommentForDecl((NonVirtualBase), PP)) 631 return cloneFullComment(FC, D); 632 } 633 } 634 // Check virtual bases. 635 for (const auto &I : RD->vbases()) { 636 if (I.getAccessSpecifier() != AS_public) 637 continue; 638 QualType Ty = I.getType(); 639 if (Ty.isNull()) 640 continue; 641 if (const CXXRecordDecl *VirtualBase = Ty->getAsCXXRecordDecl()) { 642 if (!(VirtualBase= VirtualBase->getDefinition())) 643 continue; 644 if (comments::FullComment *FC = getCommentForDecl((VirtualBase), PP)) 645 return cloneFullComment(FC, D); 646 } 647 } 648 } 649 return nullptr; 650 } 651 652 // If the RawComment was attached to other redeclaration of this Decl, we 653 // should parse the comment in context of that other Decl. This is important 654 // because comments can contain references to parameter names which can be 655 // different across redeclarations. 656 if (D != OriginalDecl && OriginalDecl) 657 return getCommentForDecl(OriginalDecl, PP); 658 659 comments::FullComment *FC = RC->parse(*this, PP, D); 660 ParsedComments[Canonical] = FC; 661 return FC; 662 } 663 664 void 665 ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID, 666 const ASTContext &C, 667 TemplateTemplateParmDecl *Parm) { 668 ID.AddInteger(Parm->getDepth()); 669 ID.AddInteger(Parm->getPosition()); 670 ID.AddBoolean(Parm->isParameterPack()); 671 672 TemplateParameterList *Params = Parm->getTemplateParameters(); 673 ID.AddInteger(Params->size()); 674 for (TemplateParameterList::const_iterator P = Params->begin(), 675 PEnd = Params->end(); 676 P != PEnd; ++P) { 677 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 678 ID.AddInteger(0); 679 ID.AddBoolean(TTP->isParameterPack()); 680 const TypeConstraint *TC = TTP->getTypeConstraint(); 681 ID.AddBoolean(TC != nullptr); 682 if (TC) 683 TC->getImmediatelyDeclaredConstraint()->Profile(ID, C, 684 /*Canonical=*/true); 685 if (TTP->isExpandedParameterPack()) { 686 ID.AddBoolean(true); 687 ID.AddInteger(TTP->getNumExpansionParameters()); 688 } else 689 ID.AddBoolean(false); 690 continue; 691 } 692 693 if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 694 ID.AddInteger(1); 695 ID.AddBoolean(NTTP->isParameterPack()); 696 ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr()); 697 if (NTTP->isExpandedParameterPack()) { 698 ID.AddBoolean(true); 699 ID.AddInteger(NTTP->getNumExpansionTypes()); 700 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 701 QualType T = NTTP->getExpansionType(I); 702 ID.AddPointer(T.getCanonicalType().getAsOpaquePtr()); 703 } 704 } else 705 ID.AddBoolean(false); 706 continue; 707 } 708 709 auto *TTP = cast<TemplateTemplateParmDecl>(*P); 710 ID.AddInteger(2); 711 Profile(ID, C, TTP); 712 } 713 Expr *RequiresClause = Parm->getTemplateParameters()->getRequiresClause(); 714 ID.AddBoolean(RequiresClause != nullptr); 715 if (RequiresClause) 716 RequiresClause->Profile(ID, C, /*Canonical=*/true); 717 } 718 719 static Expr * 720 canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC, 721 QualType ConstrainedType) { 722 // This is a bit ugly - we need to form a new immediately-declared 723 // constraint that references the new parameter; this would ideally 724 // require semantic analysis (e.g. template<C T> struct S {}; - the 725 // converted arguments of C<T> could be an argument pack if C is 726 // declared as template<typename... T> concept C = ...). 727 // We don't have semantic analysis here so we dig deep into the 728 // ready-made constraint expr and change the thing manually. 729 ConceptSpecializationExpr *CSE; 730 if (const auto *Fold = dyn_cast<CXXFoldExpr>(IDC)) 731 CSE = cast<ConceptSpecializationExpr>(Fold->getLHS()); 732 else 733 CSE = cast<ConceptSpecializationExpr>(IDC); 734 ArrayRef<TemplateArgument> OldConverted = CSE->getTemplateArguments(); 735 SmallVector<TemplateArgument, 3> NewConverted; 736 NewConverted.reserve(OldConverted.size()); 737 if (OldConverted.front().getKind() == TemplateArgument::Pack) { 738 // The case: 739 // template<typename... T> concept C = true; 740 // template<C<int> T> struct S; -> constraint is C<{T, int}> 741 NewConverted.push_back(ConstrainedType); 742 for (auto &Arg : OldConverted.front().pack_elements().drop_front(1)) 743 NewConverted.push_back(Arg); 744 TemplateArgument NewPack(NewConverted); 745 746 NewConverted.clear(); 747 NewConverted.push_back(NewPack); 748 assert(OldConverted.size() == 1 && 749 "Template parameter pack should be the last parameter"); 750 } else { 751 assert(OldConverted.front().getKind() == TemplateArgument::Type && 752 "Unexpected first argument kind for immediately-declared " 753 "constraint"); 754 NewConverted.push_back(ConstrainedType); 755 for (auto &Arg : OldConverted.drop_front(1)) 756 NewConverted.push_back(Arg); 757 } 758 Expr *NewIDC = ConceptSpecializationExpr::Create( 759 C, CSE->getNamedConcept(), NewConverted, nullptr, 760 CSE->isInstantiationDependent(), CSE->containsUnexpandedParameterPack()); 761 762 if (auto *OrigFold = dyn_cast<CXXFoldExpr>(IDC)) 763 NewIDC = new (C) CXXFoldExpr( 764 OrigFold->getType(), /*Callee*/nullptr, SourceLocation(), NewIDC, 765 BinaryOperatorKind::BO_LAnd, SourceLocation(), /*RHS=*/nullptr, 766 SourceLocation(), /*NumExpansions=*/None); 767 return NewIDC; 768 } 769 770 TemplateTemplateParmDecl * 771 ASTContext::getCanonicalTemplateTemplateParmDecl( 772 TemplateTemplateParmDecl *TTP) const { 773 // Check if we already have a canonical template template parameter. 774 llvm::FoldingSetNodeID ID; 775 CanonicalTemplateTemplateParm::Profile(ID, *this, TTP); 776 void *InsertPos = nullptr; 777 CanonicalTemplateTemplateParm *Canonical 778 = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 779 if (Canonical) 780 return Canonical->getParam(); 781 782 // Build a canonical template parameter list. 783 TemplateParameterList *Params = TTP->getTemplateParameters(); 784 SmallVector<NamedDecl *, 4> CanonParams; 785 CanonParams.reserve(Params->size()); 786 for (TemplateParameterList::const_iterator P = Params->begin(), 787 PEnd = Params->end(); 788 P != PEnd; ++P) { 789 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) { 790 TemplateTypeParmDecl *NewTTP = TemplateTypeParmDecl::Create(*this, 791 getTranslationUnitDecl(), SourceLocation(), SourceLocation(), 792 TTP->getDepth(), TTP->getIndex(), nullptr, false, 793 TTP->isParameterPack(), TTP->hasTypeConstraint(), 794 TTP->isExpandedParameterPack() ? 795 llvm::Optional<unsigned>(TTP->getNumExpansionParameters()) : None); 796 if (const auto *TC = TTP->getTypeConstraint()) { 797 QualType ParamAsArgument(NewTTP->getTypeForDecl(), 0); 798 Expr *NewIDC = canonicalizeImmediatelyDeclaredConstraint( 799 *this, TC->getImmediatelyDeclaredConstraint(), 800 ParamAsArgument); 801 TemplateArgumentListInfo CanonArgsAsWritten; 802 if (auto *Args = TC->getTemplateArgsAsWritten()) 803 for (const auto &ArgLoc : Args->arguments()) 804 CanonArgsAsWritten.addArgument( 805 TemplateArgumentLoc(ArgLoc.getArgument(), 806 TemplateArgumentLocInfo())); 807 NewTTP->setTypeConstraint( 808 NestedNameSpecifierLoc(), 809 DeclarationNameInfo(TC->getNamedConcept()->getDeclName(), 810 SourceLocation()), /*FoundDecl=*/nullptr, 811 // Actually canonicalizing a TemplateArgumentLoc is difficult so we 812 // simply omit the ArgsAsWritten 813 TC->getNamedConcept(), /*ArgsAsWritten=*/nullptr, NewIDC); 814 } 815 CanonParams.push_back(NewTTP); 816 } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) { 817 QualType T = getCanonicalType(NTTP->getType()); 818 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 819 NonTypeTemplateParmDecl *Param; 820 if (NTTP->isExpandedParameterPack()) { 821 SmallVector<QualType, 2> ExpandedTypes; 822 SmallVector<TypeSourceInfo *, 2> ExpandedTInfos; 823 for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) { 824 ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I))); 825 ExpandedTInfos.push_back( 826 getTrivialTypeSourceInfo(ExpandedTypes.back())); 827 } 828 829 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 830 SourceLocation(), 831 SourceLocation(), 832 NTTP->getDepth(), 833 NTTP->getPosition(), nullptr, 834 T, 835 TInfo, 836 ExpandedTypes, 837 ExpandedTInfos); 838 } else { 839 Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 840 SourceLocation(), 841 SourceLocation(), 842 NTTP->getDepth(), 843 NTTP->getPosition(), nullptr, 844 T, 845 NTTP->isParameterPack(), 846 TInfo); 847 } 848 if (AutoType *AT = T->getContainedAutoType()) { 849 if (AT->isConstrained()) { 850 Param->setPlaceholderTypeConstraint( 851 canonicalizeImmediatelyDeclaredConstraint( 852 *this, NTTP->getPlaceholderTypeConstraint(), T)); 853 } 854 } 855 CanonParams.push_back(Param); 856 857 } else 858 CanonParams.push_back(getCanonicalTemplateTemplateParmDecl( 859 cast<TemplateTemplateParmDecl>(*P))); 860 } 861 862 Expr *CanonRequiresClause = nullptr; 863 if (Expr *RequiresClause = TTP->getTemplateParameters()->getRequiresClause()) 864 CanonRequiresClause = RequiresClause; 865 866 TemplateTemplateParmDecl *CanonTTP 867 = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(), 868 SourceLocation(), TTP->getDepth(), 869 TTP->getPosition(), 870 TTP->isParameterPack(), 871 nullptr, 872 TemplateParameterList::Create(*this, SourceLocation(), 873 SourceLocation(), 874 CanonParams, 875 SourceLocation(), 876 CanonRequiresClause)); 877 878 // Get the new insert position for the node we care about. 879 Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos); 880 assert(!Canonical && "Shouldn't be in the map!"); 881 (void)Canonical; 882 883 // Create the canonical template template parameter entry. 884 Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP); 885 CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos); 886 return CanonTTP; 887 } 888 889 TargetCXXABI::Kind ASTContext::getCXXABIKind() const { 890 auto Kind = getTargetInfo().getCXXABI().getKind(); 891 return getLangOpts().CXXABI.getValueOr(Kind); 892 } 893 894 CXXABI *ASTContext::createCXXABI(const TargetInfo &T) { 895 if (!LangOpts.CPlusPlus) return nullptr; 896 897 switch (getCXXABIKind()) { 898 case TargetCXXABI::AppleARM64: 899 case TargetCXXABI::Fuchsia: 900 case TargetCXXABI::GenericARM: // Same as Itanium at this level 901 case TargetCXXABI::iOS: 902 case TargetCXXABI::WatchOS: 903 case TargetCXXABI::GenericAArch64: 904 case TargetCXXABI::GenericMIPS: 905 case TargetCXXABI::GenericItanium: 906 case TargetCXXABI::WebAssembly: 907 case TargetCXXABI::XL: 908 return CreateItaniumCXXABI(*this); 909 case TargetCXXABI::Microsoft: 910 return CreateMicrosoftCXXABI(*this); 911 } 912 llvm_unreachable("Invalid CXXABI type!"); 913 } 914 915 interp::Context &ASTContext::getInterpContext() { 916 if (!InterpContext) { 917 InterpContext.reset(new interp::Context(*this)); 918 } 919 return *InterpContext.get(); 920 } 921 922 ParentMapContext &ASTContext::getParentMapContext() { 923 if (!ParentMapCtx) 924 ParentMapCtx.reset(new ParentMapContext(*this)); 925 return *ParentMapCtx.get(); 926 } 927 928 static const LangASMap *getAddressSpaceMap(const TargetInfo &T, 929 const LangOptions &LOpts) { 930 if (LOpts.FakeAddressSpaceMap) { 931 // The fake address space map must have a distinct entry for each 932 // language-specific address space. 933 static const unsigned FakeAddrSpaceMap[] = { 934 0, // Default 935 1, // opencl_global 936 3, // opencl_local 937 2, // opencl_constant 938 0, // opencl_private 939 4, // opencl_generic 940 5, // opencl_global_device 941 6, // opencl_global_host 942 7, // cuda_device 943 8, // cuda_constant 944 9, // cuda_shared 945 1, // sycl_global 946 5, // sycl_global_device 947 6, // sycl_global_host 948 3, // sycl_local 949 0, // sycl_private 950 10, // ptr32_sptr 951 11, // ptr32_uptr 952 12 // ptr64 953 }; 954 return &FakeAddrSpaceMap; 955 } else { 956 return &T.getAddressSpaceMap(); 957 } 958 } 959 960 static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI, 961 const LangOptions &LangOpts) { 962 switch (LangOpts.getAddressSpaceMapMangling()) { 963 case LangOptions::ASMM_Target: 964 return TI.useAddressSpaceMapMangling(); 965 case LangOptions::ASMM_On: 966 return true; 967 case LangOptions::ASMM_Off: 968 return false; 969 } 970 llvm_unreachable("getAddressSpaceMapMangling() doesn't cover anything."); 971 } 972 973 ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM, 974 IdentifierTable &idents, SelectorTable &sels, 975 Builtin::Context &builtins, TranslationUnitKind TUKind) 976 : ConstantArrayTypes(this_()), FunctionProtoTypes(this_()), 977 TemplateSpecializationTypes(this_()), 978 DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()), 979 SubstTemplateTemplateParmPacks(this_()), 980 CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts), 981 NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)), 982 XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles, 983 LangOpts.XRayNeverInstrumentFiles, 984 LangOpts.XRayAttrListFiles, SM)), 985 ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)), 986 PrintingPolicy(LOpts), Idents(idents), Selectors(sels), 987 BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this), 988 Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts), 989 CompCategories(this_()), LastSDM(nullptr, 0) { 990 addTranslationUnitDecl(); 991 } 992 993 void ASTContext::cleanup() { 994 // Release the DenseMaps associated with DeclContext objects. 995 // FIXME: Is this the ideal solution? 996 ReleaseDeclContextMaps(); 997 998 // Call all of the deallocation functions on all of their targets. 999 for (auto &Pair : Deallocations) 1000 (Pair.first)(Pair.second); 1001 Deallocations.clear(); 1002 1003 // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed 1004 // because they can contain DenseMaps. 1005 for (llvm::DenseMap<const ObjCContainerDecl*, 1006 const ASTRecordLayout*>::iterator 1007 I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) 1008 // Increment in loop to prevent using deallocated memory. 1009 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 1010 R->Destroy(*this); 1011 ObjCLayouts.clear(); 1012 1013 for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator 1014 I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) { 1015 // Increment in loop to prevent using deallocated memory. 1016 if (auto *R = const_cast<ASTRecordLayout *>((I++)->second)) 1017 R->Destroy(*this); 1018 } 1019 ASTRecordLayouts.clear(); 1020 1021 for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(), 1022 AEnd = DeclAttrs.end(); 1023 A != AEnd; ++A) 1024 A->second->~AttrVec(); 1025 DeclAttrs.clear(); 1026 1027 for (const auto &Value : ModuleInitializers) 1028 Value.second->~PerModuleInitializers(); 1029 ModuleInitializers.clear(); 1030 } 1031 1032 ASTContext::~ASTContext() { cleanup(); } 1033 1034 void ASTContext::setTraversalScope(const std::vector<Decl *> &TopLevelDecls) { 1035 TraversalScope = TopLevelDecls; 1036 getParentMapContext().clear(); 1037 } 1038 1039 void ASTContext::AddDeallocation(void (*Callback)(void *), void *Data) const { 1040 Deallocations.push_back({Callback, Data}); 1041 } 1042 1043 void 1044 ASTContext::setExternalSource(IntrusiveRefCntPtr<ExternalASTSource> Source) { 1045 ExternalSource = std::move(Source); 1046 } 1047 1048 void ASTContext::PrintStats() const { 1049 llvm::errs() << "\n*** AST Context Stats:\n"; 1050 llvm::errs() << " " << Types.size() << " types total.\n"; 1051 1052 unsigned counts[] = { 1053 #define TYPE(Name, Parent) 0, 1054 #define ABSTRACT_TYPE(Name, Parent) 1055 #include "clang/AST/TypeNodes.inc" 1056 0 // Extra 1057 }; 1058 1059 for (unsigned i = 0, e = Types.size(); i != e; ++i) { 1060 Type *T = Types[i]; 1061 counts[(unsigned)T->getTypeClass()]++; 1062 } 1063 1064 unsigned Idx = 0; 1065 unsigned TotalBytes = 0; 1066 #define TYPE(Name, Parent) \ 1067 if (counts[Idx]) \ 1068 llvm::errs() << " " << counts[Idx] << " " << #Name \ 1069 << " types, " << sizeof(Name##Type) << " each " \ 1070 << "(" << counts[Idx] * sizeof(Name##Type) \ 1071 << " bytes)\n"; \ 1072 TotalBytes += counts[Idx] * sizeof(Name##Type); \ 1073 ++Idx; 1074 #define ABSTRACT_TYPE(Name, Parent) 1075 #include "clang/AST/TypeNodes.inc" 1076 1077 llvm::errs() << "Total bytes = " << TotalBytes << "\n"; 1078 1079 // Implicit special member functions. 1080 llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/" 1081 << NumImplicitDefaultConstructors 1082 << " implicit default constructors created\n"; 1083 llvm::errs() << NumImplicitCopyConstructorsDeclared << "/" 1084 << NumImplicitCopyConstructors 1085 << " implicit copy constructors created\n"; 1086 if (getLangOpts().CPlusPlus) 1087 llvm::errs() << NumImplicitMoveConstructorsDeclared << "/" 1088 << NumImplicitMoveConstructors 1089 << " implicit move constructors created\n"; 1090 llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/" 1091 << NumImplicitCopyAssignmentOperators 1092 << " implicit copy assignment operators created\n"; 1093 if (getLangOpts().CPlusPlus) 1094 llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/" 1095 << NumImplicitMoveAssignmentOperators 1096 << " implicit move assignment operators created\n"; 1097 llvm::errs() << NumImplicitDestructorsDeclared << "/" 1098 << NumImplicitDestructors 1099 << " implicit destructors created\n"; 1100 1101 if (ExternalSource) { 1102 llvm::errs() << "\n"; 1103 ExternalSource->PrintStats(); 1104 } 1105 1106 BumpAlloc.PrintStats(); 1107 } 1108 1109 void ASTContext::mergeDefinitionIntoModule(NamedDecl *ND, Module *M, 1110 bool NotifyListeners) { 1111 if (NotifyListeners) 1112 if (auto *Listener = getASTMutationListener()) 1113 Listener->RedefinedHiddenDefinition(ND, M); 1114 1115 MergedDefModules[cast<NamedDecl>(ND->getCanonicalDecl())].push_back(M); 1116 } 1117 1118 void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) { 1119 auto It = MergedDefModules.find(cast<NamedDecl>(ND->getCanonicalDecl())); 1120 if (It == MergedDefModules.end()) 1121 return; 1122 1123 auto &Merged = It->second; 1124 llvm::DenseSet<Module*> Found; 1125 for (Module *&M : Merged) 1126 if (!Found.insert(M).second) 1127 M = nullptr; 1128 llvm::erase_value(Merged, nullptr); 1129 } 1130 1131 ArrayRef<Module *> 1132 ASTContext::getModulesWithMergedDefinition(const NamedDecl *Def) { 1133 auto MergedIt = 1134 MergedDefModules.find(cast<NamedDecl>(Def->getCanonicalDecl())); 1135 if (MergedIt == MergedDefModules.end()) 1136 return None; 1137 return MergedIt->second; 1138 } 1139 1140 void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) { 1141 if (LazyInitializers.empty()) 1142 return; 1143 1144 auto *Source = Ctx.getExternalSource(); 1145 assert(Source && "lazy initializers but no external source"); 1146 1147 auto LazyInits = std::move(LazyInitializers); 1148 LazyInitializers.clear(); 1149 1150 for (auto ID : LazyInits) 1151 Initializers.push_back(Source->GetExternalDecl(ID)); 1152 1153 assert(LazyInitializers.empty() && 1154 "GetExternalDecl for lazy module initializer added more inits"); 1155 } 1156 1157 void ASTContext::addModuleInitializer(Module *M, Decl *D) { 1158 // One special case: if we add a module initializer that imports another 1159 // module, and that module's only initializer is an ImportDecl, simplify. 1160 if (const auto *ID = dyn_cast<ImportDecl>(D)) { 1161 auto It = ModuleInitializers.find(ID->getImportedModule()); 1162 1163 // Maybe the ImportDecl does nothing at all. (Common case.) 1164 if (It == ModuleInitializers.end()) 1165 return; 1166 1167 // Maybe the ImportDecl only imports another ImportDecl. 1168 auto &Imported = *It->second; 1169 if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) { 1170 Imported.resolve(*this); 1171 auto *OnlyDecl = Imported.Initializers.front(); 1172 if (isa<ImportDecl>(OnlyDecl)) 1173 D = OnlyDecl; 1174 } 1175 } 1176 1177 auto *&Inits = ModuleInitializers[M]; 1178 if (!Inits) 1179 Inits = new (*this) PerModuleInitializers; 1180 Inits->Initializers.push_back(D); 1181 } 1182 1183 void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) { 1184 auto *&Inits = ModuleInitializers[M]; 1185 if (!Inits) 1186 Inits = new (*this) PerModuleInitializers; 1187 Inits->LazyInitializers.insert(Inits->LazyInitializers.end(), 1188 IDs.begin(), IDs.end()); 1189 } 1190 1191 ArrayRef<Decl *> ASTContext::getModuleInitializers(Module *M) { 1192 auto It = ModuleInitializers.find(M); 1193 if (It == ModuleInitializers.end()) 1194 return None; 1195 1196 auto *Inits = It->second; 1197 Inits->resolve(*this); 1198 return Inits->Initializers; 1199 } 1200 1201 ExternCContextDecl *ASTContext::getExternCContextDecl() const { 1202 if (!ExternCContext) 1203 ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl()); 1204 1205 return ExternCContext; 1206 } 1207 1208 BuiltinTemplateDecl * 1209 ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK, 1210 const IdentifierInfo *II) const { 1211 auto *BuiltinTemplate = 1212 BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK); 1213 BuiltinTemplate->setImplicit(); 1214 getTranslationUnitDecl()->addDecl(BuiltinTemplate); 1215 1216 return BuiltinTemplate; 1217 } 1218 1219 BuiltinTemplateDecl * 1220 ASTContext::getMakeIntegerSeqDecl() const { 1221 if (!MakeIntegerSeqDecl) 1222 MakeIntegerSeqDecl = buildBuiltinTemplateDecl(BTK__make_integer_seq, 1223 getMakeIntegerSeqName()); 1224 return MakeIntegerSeqDecl; 1225 } 1226 1227 BuiltinTemplateDecl * 1228 ASTContext::getTypePackElementDecl() const { 1229 if (!TypePackElementDecl) 1230 TypePackElementDecl = buildBuiltinTemplateDecl(BTK__type_pack_element, 1231 getTypePackElementName()); 1232 return TypePackElementDecl; 1233 } 1234 1235 RecordDecl *ASTContext::buildImplicitRecord(StringRef Name, 1236 RecordDecl::TagKind TK) const { 1237 SourceLocation Loc; 1238 RecordDecl *NewDecl; 1239 if (getLangOpts().CPlusPlus) 1240 NewDecl = CXXRecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, 1241 Loc, &Idents.get(Name)); 1242 else 1243 NewDecl = RecordDecl::Create(*this, TK, getTranslationUnitDecl(), Loc, Loc, 1244 &Idents.get(Name)); 1245 NewDecl->setImplicit(); 1246 NewDecl->addAttr(TypeVisibilityAttr::CreateImplicit( 1247 const_cast<ASTContext &>(*this), TypeVisibilityAttr::Default)); 1248 return NewDecl; 1249 } 1250 1251 TypedefDecl *ASTContext::buildImplicitTypedef(QualType T, 1252 StringRef Name) const { 1253 TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T); 1254 TypedefDecl *NewDecl = TypedefDecl::Create( 1255 const_cast<ASTContext &>(*this), getTranslationUnitDecl(), 1256 SourceLocation(), SourceLocation(), &Idents.get(Name), TInfo); 1257 NewDecl->setImplicit(); 1258 return NewDecl; 1259 } 1260 1261 TypedefDecl *ASTContext::getInt128Decl() const { 1262 if (!Int128Decl) 1263 Int128Decl = buildImplicitTypedef(Int128Ty, "__int128_t"); 1264 return Int128Decl; 1265 } 1266 1267 TypedefDecl *ASTContext::getUInt128Decl() const { 1268 if (!UInt128Decl) 1269 UInt128Decl = buildImplicitTypedef(UnsignedInt128Ty, "__uint128_t"); 1270 return UInt128Decl; 1271 } 1272 1273 void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) { 1274 auto *Ty = new (*this, TypeAlignment) BuiltinType(K); 1275 R = CanQualType::CreateUnsafe(QualType(Ty, 0)); 1276 Types.push_back(Ty); 1277 } 1278 1279 void ASTContext::InitBuiltinTypes(const TargetInfo &Target, 1280 const TargetInfo *AuxTarget) { 1281 assert((!this->Target || this->Target == &Target) && 1282 "Incorrect target reinitialization"); 1283 assert(VoidTy.isNull() && "Context reinitialized?"); 1284 1285 this->Target = &Target; 1286 this->AuxTarget = AuxTarget; 1287 1288 ABI.reset(createCXXABI(Target)); 1289 AddrSpaceMap = getAddressSpaceMap(Target, LangOpts); 1290 AddrSpaceMapMangling = isAddrSpaceMapManglingEnabled(Target, LangOpts); 1291 1292 // C99 6.2.5p19. 1293 InitBuiltinType(VoidTy, BuiltinType::Void); 1294 1295 // C99 6.2.5p2. 1296 InitBuiltinType(BoolTy, BuiltinType::Bool); 1297 // C99 6.2.5p3. 1298 if (LangOpts.CharIsSigned) 1299 InitBuiltinType(CharTy, BuiltinType::Char_S); 1300 else 1301 InitBuiltinType(CharTy, BuiltinType::Char_U); 1302 // C99 6.2.5p4. 1303 InitBuiltinType(SignedCharTy, BuiltinType::SChar); 1304 InitBuiltinType(ShortTy, BuiltinType::Short); 1305 InitBuiltinType(IntTy, BuiltinType::Int); 1306 InitBuiltinType(LongTy, BuiltinType::Long); 1307 InitBuiltinType(LongLongTy, BuiltinType::LongLong); 1308 1309 // C99 6.2.5p6. 1310 InitBuiltinType(UnsignedCharTy, BuiltinType::UChar); 1311 InitBuiltinType(UnsignedShortTy, BuiltinType::UShort); 1312 InitBuiltinType(UnsignedIntTy, BuiltinType::UInt); 1313 InitBuiltinType(UnsignedLongTy, BuiltinType::ULong); 1314 InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong); 1315 1316 // C99 6.2.5p10. 1317 InitBuiltinType(FloatTy, BuiltinType::Float); 1318 InitBuiltinType(DoubleTy, BuiltinType::Double); 1319 InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble); 1320 1321 // GNU extension, __float128 for IEEE quadruple precision 1322 InitBuiltinType(Float128Ty, BuiltinType::Float128); 1323 1324 // __ibm128 for IBM extended precision 1325 InitBuiltinType(Ibm128Ty, BuiltinType::Ibm128); 1326 1327 // C11 extension ISO/IEC TS 18661-3 1328 InitBuiltinType(Float16Ty, BuiltinType::Float16); 1329 1330 // ISO/IEC JTC1 SC22 WG14 N1169 Extension 1331 InitBuiltinType(ShortAccumTy, BuiltinType::ShortAccum); 1332 InitBuiltinType(AccumTy, BuiltinType::Accum); 1333 InitBuiltinType(LongAccumTy, BuiltinType::LongAccum); 1334 InitBuiltinType(UnsignedShortAccumTy, BuiltinType::UShortAccum); 1335 InitBuiltinType(UnsignedAccumTy, BuiltinType::UAccum); 1336 InitBuiltinType(UnsignedLongAccumTy, BuiltinType::ULongAccum); 1337 InitBuiltinType(ShortFractTy, BuiltinType::ShortFract); 1338 InitBuiltinType(FractTy, BuiltinType::Fract); 1339 InitBuiltinType(LongFractTy, BuiltinType::LongFract); 1340 InitBuiltinType(UnsignedShortFractTy, BuiltinType::UShortFract); 1341 InitBuiltinType(UnsignedFractTy, BuiltinType::UFract); 1342 InitBuiltinType(UnsignedLongFractTy, BuiltinType::ULongFract); 1343 InitBuiltinType(SatShortAccumTy, BuiltinType::SatShortAccum); 1344 InitBuiltinType(SatAccumTy, BuiltinType::SatAccum); 1345 InitBuiltinType(SatLongAccumTy, BuiltinType::SatLongAccum); 1346 InitBuiltinType(SatUnsignedShortAccumTy, BuiltinType::SatUShortAccum); 1347 InitBuiltinType(SatUnsignedAccumTy, BuiltinType::SatUAccum); 1348 InitBuiltinType(SatUnsignedLongAccumTy, BuiltinType::SatULongAccum); 1349 InitBuiltinType(SatShortFractTy, BuiltinType::SatShortFract); 1350 InitBuiltinType(SatFractTy, BuiltinType::SatFract); 1351 InitBuiltinType(SatLongFractTy, BuiltinType::SatLongFract); 1352 InitBuiltinType(SatUnsignedShortFractTy, BuiltinType::SatUShortFract); 1353 InitBuiltinType(SatUnsignedFractTy, BuiltinType::SatUFract); 1354 InitBuiltinType(SatUnsignedLongFractTy, BuiltinType::SatULongFract); 1355 1356 // GNU extension, 128-bit integers. 1357 InitBuiltinType(Int128Ty, BuiltinType::Int128); 1358 InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128); 1359 1360 // C++ 3.9.1p5 1361 if (TargetInfo::isTypeSigned(Target.getWCharType())) 1362 InitBuiltinType(WCharTy, BuiltinType::WChar_S); 1363 else // -fshort-wchar makes wchar_t be unsigned. 1364 InitBuiltinType(WCharTy, BuiltinType::WChar_U); 1365 if (LangOpts.CPlusPlus && LangOpts.WChar) 1366 WideCharTy = WCharTy; 1367 else { 1368 // C99 (or C++ using -fno-wchar). 1369 WideCharTy = getFromTargetType(Target.getWCharType()); 1370 } 1371 1372 WIntTy = getFromTargetType(Target.getWIntType()); 1373 1374 // C++20 (proposed) 1375 InitBuiltinType(Char8Ty, BuiltinType::Char8); 1376 1377 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1378 InitBuiltinType(Char16Ty, BuiltinType::Char16); 1379 else // C99 1380 Char16Ty = getFromTargetType(Target.getChar16Type()); 1381 1382 if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++ 1383 InitBuiltinType(Char32Ty, BuiltinType::Char32); 1384 else // C99 1385 Char32Ty = getFromTargetType(Target.getChar32Type()); 1386 1387 // Placeholder type for type-dependent expressions whose type is 1388 // completely unknown. No code should ever check a type against 1389 // DependentTy and users should never see it; however, it is here to 1390 // help diagnose failures to properly check for type-dependent 1391 // expressions. 1392 InitBuiltinType(DependentTy, BuiltinType::Dependent); 1393 1394 // Placeholder type for functions. 1395 InitBuiltinType(OverloadTy, BuiltinType::Overload); 1396 1397 // Placeholder type for bound members. 1398 InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember); 1399 1400 // Placeholder type for pseudo-objects. 1401 InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject); 1402 1403 // "any" type; useful for debugger-like clients. 1404 InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny); 1405 1406 // Placeholder type for unbridged ARC casts. 1407 InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast); 1408 1409 // Placeholder type for builtin functions. 1410 InitBuiltinType(BuiltinFnTy, BuiltinType::BuiltinFn); 1411 1412 // Placeholder type for OMP array sections. 1413 if (LangOpts.OpenMP) { 1414 InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection); 1415 InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping); 1416 InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator); 1417 } 1418 if (LangOpts.MatrixTypes) 1419 InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx); 1420 1421 // Builtin types for 'id', 'Class', and 'SEL'. 1422 InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId); 1423 InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass); 1424 InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel); 1425 1426 if (LangOpts.OpenCL) { 1427 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 1428 InitBuiltinType(SingletonId, BuiltinType::Id); 1429 #include "clang/Basic/OpenCLImageTypes.def" 1430 1431 InitBuiltinType(OCLSamplerTy, BuiltinType::OCLSampler); 1432 InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent); 1433 InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent); 1434 InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue); 1435 InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID); 1436 1437 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 1438 InitBuiltinType(Id##Ty, BuiltinType::Id); 1439 #include "clang/Basic/OpenCLExtensionTypes.def" 1440 } 1441 1442 if (Target.hasAArch64SVETypes()) { 1443 #define SVE_TYPE(Name, Id, SingletonId) \ 1444 InitBuiltinType(SingletonId, BuiltinType::Id); 1445 #include "clang/Basic/AArch64SVEACLETypes.def" 1446 } 1447 1448 if (Target.getTriple().isPPC64()) { 1449 #define PPC_VECTOR_MMA_TYPE(Name, Id, Size) \ 1450 InitBuiltinType(Id##Ty, BuiltinType::Id); 1451 #include "clang/Basic/PPCTypes.def" 1452 #define PPC_VECTOR_VSX_TYPE(Name, Id, Size) \ 1453 InitBuiltinType(Id##Ty, BuiltinType::Id); 1454 #include "clang/Basic/PPCTypes.def" 1455 } 1456 1457 if (Target.hasRISCVVTypes()) { 1458 #define RVV_TYPE(Name, Id, SingletonId) \ 1459 InitBuiltinType(SingletonId, BuiltinType::Id); 1460 #include "clang/Basic/RISCVVTypes.def" 1461 } 1462 1463 // Builtin type for __objc_yes and __objc_no 1464 ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ? 1465 SignedCharTy : BoolTy); 1466 1467 ObjCConstantStringType = QualType(); 1468 1469 ObjCSuperType = QualType(); 1470 1471 // void * type 1472 if (LangOpts.OpenCLGenericAddressSpace) { 1473 auto Q = VoidTy.getQualifiers(); 1474 Q.setAddressSpace(LangAS::opencl_generic); 1475 VoidPtrTy = getPointerType(getCanonicalType( 1476 getQualifiedType(VoidTy.getUnqualifiedType(), Q))); 1477 } else { 1478 VoidPtrTy = getPointerType(VoidTy); 1479 } 1480 1481 // nullptr type (C++0x 2.14.7) 1482 InitBuiltinType(NullPtrTy, BuiltinType::NullPtr); 1483 1484 // half type (OpenCL 6.1.1.1) / ARM NEON __fp16 1485 InitBuiltinType(HalfTy, BuiltinType::Half); 1486 1487 InitBuiltinType(BFloat16Ty, BuiltinType::BFloat16); 1488 1489 // Builtin type used to help define __builtin_va_list. 1490 VaListTagDecl = nullptr; 1491 1492 // MSVC predeclares struct _GUID, and we need it to create MSGuidDecls. 1493 if (LangOpts.MicrosoftExt || LangOpts.Borland) { 1494 MSGuidTagDecl = buildImplicitRecord("_GUID"); 1495 getTranslationUnitDecl()->addDecl(MSGuidTagDecl); 1496 } 1497 } 1498 1499 DiagnosticsEngine &ASTContext::getDiagnostics() const { 1500 return SourceMgr.getDiagnostics(); 1501 } 1502 1503 AttrVec& ASTContext::getDeclAttrs(const Decl *D) { 1504 AttrVec *&Result = DeclAttrs[D]; 1505 if (!Result) { 1506 void *Mem = Allocate(sizeof(AttrVec)); 1507 Result = new (Mem) AttrVec; 1508 } 1509 1510 return *Result; 1511 } 1512 1513 /// Erase the attributes corresponding to the given declaration. 1514 void ASTContext::eraseDeclAttrs(const Decl *D) { 1515 llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D); 1516 if (Pos != DeclAttrs.end()) { 1517 Pos->second->~AttrVec(); 1518 DeclAttrs.erase(Pos); 1519 } 1520 } 1521 1522 // FIXME: Remove ? 1523 MemberSpecializationInfo * 1524 ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) { 1525 assert(Var->isStaticDataMember() && "Not a static data member"); 1526 return getTemplateOrSpecializationInfo(Var) 1527 .dyn_cast<MemberSpecializationInfo *>(); 1528 } 1529 1530 ASTContext::TemplateOrSpecializationInfo 1531 ASTContext::getTemplateOrSpecializationInfo(const VarDecl *Var) { 1532 llvm::DenseMap<const VarDecl *, TemplateOrSpecializationInfo>::iterator Pos = 1533 TemplateOrInstantiation.find(Var); 1534 if (Pos == TemplateOrInstantiation.end()) 1535 return {}; 1536 1537 return Pos->second; 1538 } 1539 1540 void 1541 ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl, 1542 TemplateSpecializationKind TSK, 1543 SourceLocation PointOfInstantiation) { 1544 assert(Inst->isStaticDataMember() && "Not a static data member"); 1545 assert(Tmpl->isStaticDataMember() && "Not a static data member"); 1546 setTemplateOrSpecializationInfo(Inst, new (*this) MemberSpecializationInfo( 1547 Tmpl, TSK, PointOfInstantiation)); 1548 } 1549 1550 void 1551 ASTContext::setTemplateOrSpecializationInfo(VarDecl *Inst, 1552 TemplateOrSpecializationInfo TSI) { 1553 assert(!TemplateOrInstantiation[Inst] && 1554 "Already noted what the variable was instantiated from"); 1555 TemplateOrInstantiation[Inst] = TSI; 1556 } 1557 1558 NamedDecl * 1559 ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) { 1560 auto Pos = InstantiatedFromUsingDecl.find(UUD); 1561 if (Pos == InstantiatedFromUsingDecl.end()) 1562 return nullptr; 1563 1564 return Pos->second; 1565 } 1566 1567 void 1568 ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) { 1569 assert((isa<UsingDecl>(Pattern) || 1570 isa<UnresolvedUsingValueDecl>(Pattern) || 1571 isa<UnresolvedUsingTypenameDecl>(Pattern)) && 1572 "pattern decl is not a using decl"); 1573 assert((isa<UsingDecl>(Inst) || 1574 isa<UnresolvedUsingValueDecl>(Inst) || 1575 isa<UnresolvedUsingTypenameDecl>(Inst)) && 1576 "instantiation did not produce a using decl"); 1577 assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists"); 1578 InstantiatedFromUsingDecl[Inst] = Pattern; 1579 } 1580 1581 UsingEnumDecl * 1582 ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) { 1583 auto Pos = InstantiatedFromUsingEnumDecl.find(UUD); 1584 if (Pos == InstantiatedFromUsingEnumDecl.end()) 1585 return nullptr; 1586 1587 return Pos->second; 1588 } 1589 1590 void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst, 1591 UsingEnumDecl *Pattern) { 1592 assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists"); 1593 InstantiatedFromUsingEnumDecl[Inst] = Pattern; 1594 } 1595 1596 UsingShadowDecl * 1597 ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) { 1598 llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos 1599 = InstantiatedFromUsingShadowDecl.find(Inst); 1600 if (Pos == InstantiatedFromUsingShadowDecl.end()) 1601 return nullptr; 1602 1603 return Pos->second; 1604 } 1605 1606 void 1607 ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst, 1608 UsingShadowDecl *Pattern) { 1609 assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists"); 1610 InstantiatedFromUsingShadowDecl[Inst] = Pattern; 1611 } 1612 1613 FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) { 1614 llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos 1615 = InstantiatedFromUnnamedFieldDecl.find(Field); 1616 if (Pos == InstantiatedFromUnnamedFieldDecl.end()) 1617 return nullptr; 1618 1619 return Pos->second; 1620 } 1621 1622 void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, 1623 FieldDecl *Tmpl) { 1624 assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed"); 1625 assert(!Tmpl->getDeclName() && "Template field decl is not unnamed"); 1626 assert(!InstantiatedFromUnnamedFieldDecl[Inst] && 1627 "Already noted what unnamed field was instantiated from"); 1628 1629 InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl; 1630 } 1631 1632 ASTContext::overridden_cxx_method_iterator 1633 ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const { 1634 return overridden_methods(Method).begin(); 1635 } 1636 1637 ASTContext::overridden_cxx_method_iterator 1638 ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const { 1639 return overridden_methods(Method).end(); 1640 } 1641 1642 unsigned 1643 ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const { 1644 auto Range = overridden_methods(Method); 1645 return Range.end() - Range.begin(); 1646 } 1647 1648 ASTContext::overridden_method_range 1649 ASTContext::overridden_methods(const CXXMethodDecl *Method) const { 1650 llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos = 1651 OverriddenMethods.find(Method->getCanonicalDecl()); 1652 if (Pos == OverriddenMethods.end()) 1653 return overridden_method_range(nullptr, nullptr); 1654 return overridden_method_range(Pos->second.begin(), Pos->second.end()); 1655 } 1656 1657 void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method, 1658 const CXXMethodDecl *Overridden) { 1659 assert(Method->isCanonicalDecl() && Overridden->isCanonicalDecl()); 1660 OverriddenMethods[Method].push_back(Overridden); 1661 } 1662 1663 void ASTContext::getOverriddenMethods( 1664 const NamedDecl *D, 1665 SmallVectorImpl<const NamedDecl *> &Overridden) const { 1666 assert(D); 1667 1668 if (const auto *CXXMethod = dyn_cast<CXXMethodDecl>(D)) { 1669 Overridden.append(overridden_methods_begin(CXXMethod), 1670 overridden_methods_end(CXXMethod)); 1671 return; 1672 } 1673 1674 const auto *Method = dyn_cast<ObjCMethodDecl>(D); 1675 if (!Method) 1676 return; 1677 1678 SmallVector<const ObjCMethodDecl *, 8> OverDecls; 1679 Method->getOverriddenMethods(OverDecls); 1680 Overridden.append(OverDecls.begin(), OverDecls.end()); 1681 } 1682 1683 void ASTContext::addedLocalImportDecl(ImportDecl *Import) { 1684 assert(!Import->getNextLocalImport() && 1685 "Import declaration already in the chain"); 1686 assert(!Import->isFromASTFile() && "Non-local import declaration"); 1687 if (!FirstLocalImport) { 1688 FirstLocalImport = Import; 1689 LastLocalImport = Import; 1690 return; 1691 } 1692 1693 LastLocalImport->setNextLocalImport(Import); 1694 LastLocalImport = Import; 1695 } 1696 1697 //===----------------------------------------------------------------------===// 1698 // Type Sizing and Analysis 1699 //===----------------------------------------------------------------------===// 1700 1701 /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified 1702 /// scalar floating point type. 1703 const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const { 1704 switch (T->castAs<BuiltinType>()->getKind()) { 1705 default: 1706 llvm_unreachable("Not a floating point type!"); 1707 case BuiltinType::BFloat16: 1708 return Target->getBFloat16Format(); 1709 case BuiltinType::Float16: 1710 case BuiltinType::Half: 1711 return Target->getHalfFormat(); 1712 case BuiltinType::Float: return Target->getFloatFormat(); 1713 case BuiltinType::Double: return Target->getDoubleFormat(); 1714 case BuiltinType::Ibm128: 1715 return Target->getIbm128Format(); 1716 case BuiltinType::LongDouble: 1717 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) 1718 return AuxTarget->getLongDoubleFormat(); 1719 return Target->getLongDoubleFormat(); 1720 case BuiltinType::Float128: 1721 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice) 1722 return AuxTarget->getFloat128Format(); 1723 return Target->getFloat128Format(); 1724 } 1725 } 1726 1727 CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const { 1728 unsigned Align = Target->getCharWidth(); 1729 1730 bool UseAlignAttrOnly = false; 1731 if (unsigned AlignFromAttr = D->getMaxAlignment()) { 1732 Align = AlignFromAttr; 1733 1734 // __attribute__((aligned)) can increase or decrease alignment 1735 // *except* on a struct or struct member, where it only increases 1736 // alignment unless 'packed' is also specified. 1737 // 1738 // It is an error for alignas to decrease alignment, so we can 1739 // ignore that possibility; Sema should diagnose it. 1740 if (isa<FieldDecl>(D)) { 1741 UseAlignAttrOnly = D->hasAttr<PackedAttr>() || 1742 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1743 } else { 1744 UseAlignAttrOnly = true; 1745 } 1746 } 1747 else if (isa<FieldDecl>(D)) 1748 UseAlignAttrOnly = 1749 D->hasAttr<PackedAttr>() || 1750 cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>(); 1751 1752 // If we're using the align attribute only, just ignore everything 1753 // else about the declaration and its type. 1754 if (UseAlignAttrOnly) { 1755 // do nothing 1756 } else if (const auto *VD = dyn_cast<ValueDecl>(D)) { 1757 QualType T = VD->getType(); 1758 if (const auto *RT = T->getAs<ReferenceType>()) { 1759 if (ForAlignof) 1760 T = RT->getPointeeType(); 1761 else 1762 T = getPointerType(RT->getPointeeType()); 1763 } 1764 QualType BaseT = getBaseElementType(T); 1765 if (T->isFunctionType()) 1766 Align = getTypeInfoImpl(T.getTypePtr()).Align; 1767 else if (!BaseT->isIncompleteType()) { 1768 // Adjust alignments of declarations with array type by the 1769 // large-array alignment on the target. 1770 if (const ArrayType *arrayType = getAsArrayType(T)) { 1771 unsigned MinWidth = Target->getLargeArrayMinWidth(); 1772 if (!ForAlignof && MinWidth) { 1773 if (isa<VariableArrayType>(arrayType)) 1774 Align = std::max(Align, Target->getLargeArrayAlign()); 1775 else if (isa<ConstantArrayType>(arrayType) && 1776 MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType))) 1777 Align = std::max(Align, Target->getLargeArrayAlign()); 1778 } 1779 } 1780 Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr())); 1781 if (BaseT.getQualifiers().hasUnaligned()) 1782 Align = Target->getCharWidth(); 1783 if (const auto *VD = dyn_cast<VarDecl>(D)) { 1784 if (VD->hasGlobalStorage() && !ForAlignof) { 1785 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 1786 Align = std::max(Align, getTargetInfo().getMinGlobalAlign(TypeSize)); 1787 } 1788 } 1789 } 1790 1791 // Fields can be subject to extra alignment constraints, like if 1792 // the field is packed, the struct is packed, or the struct has a 1793 // a max-field-alignment constraint (#pragma pack). So calculate 1794 // the actual alignment of the field within the struct, and then 1795 // (as we're expected to) constrain that by the alignment of the type. 1796 if (const auto *Field = dyn_cast<FieldDecl>(VD)) { 1797 const RecordDecl *Parent = Field->getParent(); 1798 // We can only produce a sensible answer if the record is valid. 1799 if (!Parent->isInvalidDecl()) { 1800 const ASTRecordLayout &Layout = getASTRecordLayout(Parent); 1801 1802 // Start with the record's overall alignment. 1803 unsigned FieldAlign = toBits(Layout.getAlignment()); 1804 1805 // Use the GCD of that and the offset within the record. 1806 uint64_t Offset = Layout.getFieldOffset(Field->getFieldIndex()); 1807 if (Offset > 0) { 1808 // Alignment is always a power of 2, so the GCD will be a power of 2, 1809 // which means we get to do this crazy thing instead of Euclid's. 1810 uint64_t LowBitOfOffset = Offset & (~Offset + 1); 1811 if (LowBitOfOffset < FieldAlign) 1812 FieldAlign = static_cast<unsigned>(LowBitOfOffset); 1813 } 1814 1815 Align = std::min(Align, FieldAlign); 1816 } 1817 } 1818 } 1819 1820 // Some targets have hard limitation on the maximum requestable alignment in 1821 // aligned attribute for static variables. 1822 const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute(); 1823 const auto *VD = dyn_cast<VarDecl>(D); 1824 if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static) 1825 Align = std::min(Align, MaxAlignedAttr); 1826 1827 return toCharUnitsFromBits(Align); 1828 } 1829 1830 CharUnits ASTContext::getExnObjectAlignment() const { 1831 return toCharUnitsFromBits(Target->getExnObjectAlignment()); 1832 } 1833 1834 // getTypeInfoDataSizeInChars - Return the size of a type, in 1835 // chars. If the type is a record, its data size is returned. This is 1836 // the size of the memcpy that's performed when assigning this type 1837 // using a trivial copy/move assignment operator. 1838 TypeInfoChars ASTContext::getTypeInfoDataSizeInChars(QualType T) const { 1839 TypeInfoChars Info = getTypeInfoInChars(T); 1840 1841 // In C++, objects can sometimes be allocated into the tail padding 1842 // of a base-class subobject. We decide whether that's possible 1843 // during class layout, so here we can just trust the layout results. 1844 if (getLangOpts().CPlusPlus) { 1845 if (const auto *RT = T->getAs<RecordType>()) { 1846 const ASTRecordLayout &layout = getASTRecordLayout(RT->getDecl()); 1847 Info.Width = layout.getDataSize(); 1848 } 1849 } 1850 1851 return Info; 1852 } 1853 1854 /// getConstantArrayInfoInChars - Performing the computation in CharUnits 1855 /// instead of in bits prevents overflowing the uint64_t for some large arrays. 1856 TypeInfoChars 1857 static getConstantArrayInfoInChars(const ASTContext &Context, 1858 const ConstantArrayType *CAT) { 1859 TypeInfoChars EltInfo = Context.getTypeInfoInChars(CAT->getElementType()); 1860 uint64_t Size = CAT->getSize().getZExtValue(); 1861 assert((Size == 0 || static_cast<uint64_t>(EltInfo.Width.getQuantity()) <= 1862 (uint64_t)(-1)/Size) && 1863 "Overflow in array type char size evaluation"); 1864 uint64_t Width = EltInfo.Width.getQuantity() * Size; 1865 unsigned Align = EltInfo.Align.getQuantity(); 1866 if (!Context.getTargetInfo().getCXXABI().isMicrosoft() || 1867 Context.getTargetInfo().getPointerWidth(0) == 64) 1868 Width = llvm::alignTo(Width, Align); 1869 return TypeInfoChars(CharUnits::fromQuantity(Width), 1870 CharUnits::fromQuantity(Align), 1871 EltInfo.AlignRequirement); 1872 } 1873 1874 TypeInfoChars ASTContext::getTypeInfoInChars(const Type *T) const { 1875 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1876 return getConstantArrayInfoInChars(*this, CAT); 1877 TypeInfo Info = getTypeInfo(T); 1878 return TypeInfoChars(toCharUnitsFromBits(Info.Width), 1879 toCharUnitsFromBits(Info.Align), Info.AlignRequirement); 1880 } 1881 1882 TypeInfoChars ASTContext::getTypeInfoInChars(QualType T) const { 1883 return getTypeInfoInChars(T.getTypePtr()); 1884 } 1885 1886 bool ASTContext::isAlignmentRequired(const Type *T) const { 1887 return getTypeInfo(T).AlignRequirement != AlignRequirementKind::None; 1888 } 1889 1890 bool ASTContext::isAlignmentRequired(QualType T) const { 1891 return isAlignmentRequired(T.getTypePtr()); 1892 } 1893 1894 unsigned ASTContext::getTypeAlignIfKnown(QualType T, 1895 bool NeedsPreferredAlignment) const { 1896 // An alignment on a typedef overrides anything else. 1897 if (const auto *TT = T->getAs<TypedefType>()) 1898 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1899 return Align; 1900 1901 // If we have an (array of) complete type, we're done. 1902 T = getBaseElementType(T); 1903 if (!T->isIncompleteType()) 1904 return NeedsPreferredAlignment ? getPreferredTypeAlign(T) : getTypeAlign(T); 1905 1906 // If we had an array type, its element type might be a typedef 1907 // type with an alignment attribute. 1908 if (const auto *TT = T->getAs<TypedefType>()) 1909 if (unsigned Align = TT->getDecl()->getMaxAlignment()) 1910 return Align; 1911 1912 // Otherwise, see if the declaration of the type had an attribute. 1913 if (const auto *TT = T->getAs<TagType>()) 1914 return TT->getDecl()->getMaxAlignment(); 1915 1916 return 0; 1917 } 1918 1919 TypeInfo ASTContext::getTypeInfo(const Type *T) const { 1920 TypeInfoMap::iterator I = MemoizedTypeInfo.find(T); 1921 if (I != MemoizedTypeInfo.end()) 1922 return I->second; 1923 1924 // This call can invalidate MemoizedTypeInfo[T], so we need a second lookup. 1925 TypeInfo TI = getTypeInfoImpl(T); 1926 MemoizedTypeInfo[T] = TI; 1927 return TI; 1928 } 1929 1930 /// getTypeInfoImpl - Return the size of the specified type, in bits. This 1931 /// method does not work on incomplete types. 1932 /// 1933 /// FIXME: Pointers into different addr spaces could have different sizes and 1934 /// alignment requirements: getPointerInfo should take an AddrSpace, this 1935 /// should take a QualType, &c. 1936 TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { 1937 uint64_t Width = 0; 1938 unsigned Align = 8; 1939 AlignRequirementKind AlignRequirement = AlignRequirementKind::None; 1940 unsigned AS = 0; 1941 switch (T->getTypeClass()) { 1942 #define TYPE(Class, Base) 1943 #define ABSTRACT_TYPE(Class, Base) 1944 #define NON_CANONICAL_TYPE(Class, Base) 1945 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 1946 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) \ 1947 case Type::Class: \ 1948 assert(!T->isDependentType() && "should not see dependent types here"); \ 1949 return getTypeInfo(cast<Class##Type>(T)->desugar().getTypePtr()); 1950 #include "clang/AST/TypeNodes.inc" 1951 llvm_unreachable("Should not see dependent types"); 1952 1953 case Type::FunctionNoProto: 1954 case Type::FunctionProto: 1955 // GCC extension: alignof(function) = 32 bits 1956 Width = 0; 1957 Align = 32; 1958 break; 1959 1960 case Type::IncompleteArray: 1961 case Type::VariableArray: 1962 case Type::ConstantArray: { 1963 // Model non-constant sized arrays as size zero, but track the alignment. 1964 uint64_t Size = 0; 1965 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) 1966 Size = CAT->getSize().getZExtValue(); 1967 1968 TypeInfo EltInfo = getTypeInfo(cast<ArrayType>(T)->getElementType()); 1969 assert((Size == 0 || EltInfo.Width <= (uint64_t)(-1) / Size) && 1970 "Overflow in array type bit size evaluation"); 1971 Width = EltInfo.Width * Size; 1972 Align = EltInfo.Align; 1973 AlignRequirement = EltInfo.AlignRequirement; 1974 if (!getTargetInfo().getCXXABI().isMicrosoft() || 1975 getTargetInfo().getPointerWidth(0) == 64) 1976 Width = llvm::alignTo(Width, Align); 1977 break; 1978 } 1979 1980 case Type::ExtVector: 1981 case Type::Vector: { 1982 const auto *VT = cast<VectorType>(T); 1983 TypeInfo EltInfo = getTypeInfo(VT->getElementType()); 1984 Width = EltInfo.Width * VT->getNumElements(); 1985 Align = Width; 1986 // If the alignment is not a power of 2, round up to the next power of 2. 1987 // This happens for non-power-of-2 length vectors. 1988 if (Align & (Align-1)) { 1989 Align = llvm::NextPowerOf2(Align); 1990 Width = llvm::alignTo(Width, Align); 1991 } 1992 // Adjust the alignment based on the target max. 1993 uint64_t TargetVectorAlign = Target->getMaxVectorAlign(); 1994 if (TargetVectorAlign && TargetVectorAlign < Align) 1995 Align = TargetVectorAlign; 1996 if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 1997 // Adjust the alignment for fixed-length SVE vectors. This is important 1998 // for non-power-of-2 vector lengths. 1999 Align = 128; 2000 else if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 2001 // Adjust the alignment for fixed-length SVE predicates. 2002 Align = 16; 2003 break; 2004 } 2005 2006 case Type::ConstantMatrix: { 2007 const auto *MT = cast<ConstantMatrixType>(T); 2008 TypeInfo ElementInfo = getTypeInfo(MT->getElementType()); 2009 // The internal layout of a matrix value is implementation defined. 2010 // Initially be ABI compatible with arrays with respect to alignment and 2011 // size. 2012 Width = ElementInfo.Width * MT->getNumRows() * MT->getNumColumns(); 2013 Align = ElementInfo.Align; 2014 break; 2015 } 2016 2017 case Type::Builtin: 2018 switch (cast<BuiltinType>(T)->getKind()) { 2019 default: llvm_unreachable("Unknown builtin type!"); 2020 case BuiltinType::Void: 2021 // GCC extension: alignof(void) = 8 bits. 2022 Width = 0; 2023 Align = 8; 2024 break; 2025 case BuiltinType::Bool: 2026 Width = Target->getBoolWidth(); 2027 Align = Target->getBoolAlign(); 2028 break; 2029 case BuiltinType::Char_S: 2030 case BuiltinType::Char_U: 2031 case BuiltinType::UChar: 2032 case BuiltinType::SChar: 2033 case BuiltinType::Char8: 2034 Width = Target->getCharWidth(); 2035 Align = Target->getCharAlign(); 2036 break; 2037 case BuiltinType::WChar_S: 2038 case BuiltinType::WChar_U: 2039 Width = Target->getWCharWidth(); 2040 Align = Target->getWCharAlign(); 2041 break; 2042 case BuiltinType::Char16: 2043 Width = Target->getChar16Width(); 2044 Align = Target->getChar16Align(); 2045 break; 2046 case BuiltinType::Char32: 2047 Width = Target->getChar32Width(); 2048 Align = Target->getChar32Align(); 2049 break; 2050 case BuiltinType::UShort: 2051 case BuiltinType::Short: 2052 Width = Target->getShortWidth(); 2053 Align = Target->getShortAlign(); 2054 break; 2055 case BuiltinType::UInt: 2056 case BuiltinType::Int: 2057 Width = Target->getIntWidth(); 2058 Align = Target->getIntAlign(); 2059 break; 2060 case BuiltinType::ULong: 2061 case BuiltinType::Long: 2062 Width = Target->getLongWidth(); 2063 Align = Target->getLongAlign(); 2064 break; 2065 case BuiltinType::ULongLong: 2066 case BuiltinType::LongLong: 2067 Width = Target->getLongLongWidth(); 2068 Align = Target->getLongLongAlign(); 2069 break; 2070 case BuiltinType::Int128: 2071 case BuiltinType::UInt128: 2072 Width = 128; 2073 Align = 128; // int128_t is 128-bit aligned on all targets. 2074 break; 2075 case BuiltinType::ShortAccum: 2076 case BuiltinType::UShortAccum: 2077 case BuiltinType::SatShortAccum: 2078 case BuiltinType::SatUShortAccum: 2079 Width = Target->getShortAccumWidth(); 2080 Align = Target->getShortAccumAlign(); 2081 break; 2082 case BuiltinType::Accum: 2083 case BuiltinType::UAccum: 2084 case BuiltinType::SatAccum: 2085 case BuiltinType::SatUAccum: 2086 Width = Target->getAccumWidth(); 2087 Align = Target->getAccumAlign(); 2088 break; 2089 case BuiltinType::LongAccum: 2090 case BuiltinType::ULongAccum: 2091 case BuiltinType::SatLongAccum: 2092 case BuiltinType::SatULongAccum: 2093 Width = Target->getLongAccumWidth(); 2094 Align = Target->getLongAccumAlign(); 2095 break; 2096 case BuiltinType::ShortFract: 2097 case BuiltinType::UShortFract: 2098 case BuiltinType::SatShortFract: 2099 case BuiltinType::SatUShortFract: 2100 Width = Target->getShortFractWidth(); 2101 Align = Target->getShortFractAlign(); 2102 break; 2103 case BuiltinType::Fract: 2104 case BuiltinType::UFract: 2105 case BuiltinType::SatFract: 2106 case BuiltinType::SatUFract: 2107 Width = Target->getFractWidth(); 2108 Align = Target->getFractAlign(); 2109 break; 2110 case BuiltinType::LongFract: 2111 case BuiltinType::ULongFract: 2112 case BuiltinType::SatLongFract: 2113 case BuiltinType::SatULongFract: 2114 Width = Target->getLongFractWidth(); 2115 Align = Target->getLongFractAlign(); 2116 break; 2117 case BuiltinType::BFloat16: 2118 Width = Target->getBFloat16Width(); 2119 Align = Target->getBFloat16Align(); 2120 break; 2121 case BuiltinType::Float16: 2122 case BuiltinType::Half: 2123 if (Target->hasFloat16Type() || !getLangOpts().OpenMP || 2124 !getLangOpts().OpenMPIsDevice) { 2125 Width = Target->getHalfWidth(); 2126 Align = Target->getHalfAlign(); 2127 } else { 2128 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2129 "Expected OpenMP device compilation."); 2130 Width = AuxTarget->getHalfWidth(); 2131 Align = AuxTarget->getHalfAlign(); 2132 } 2133 break; 2134 case BuiltinType::Float: 2135 Width = Target->getFloatWidth(); 2136 Align = Target->getFloatAlign(); 2137 break; 2138 case BuiltinType::Double: 2139 Width = Target->getDoubleWidth(); 2140 Align = Target->getDoubleAlign(); 2141 break; 2142 case BuiltinType::Ibm128: 2143 Width = Target->getIbm128Width(); 2144 Align = Target->getIbm128Align(); 2145 break; 2146 case BuiltinType::LongDouble: 2147 if (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2148 (Target->getLongDoubleWidth() != AuxTarget->getLongDoubleWidth() || 2149 Target->getLongDoubleAlign() != AuxTarget->getLongDoubleAlign())) { 2150 Width = AuxTarget->getLongDoubleWidth(); 2151 Align = AuxTarget->getLongDoubleAlign(); 2152 } else { 2153 Width = Target->getLongDoubleWidth(); 2154 Align = Target->getLongDoubleAlign(); 2155 } 2156 break; 2157 case BuiltinType::Float128: 2158 if (Target->hasFloat128Type() || !getLangOpts().OpenMP || 2159 !getLangOpts().OpenMPIsDevice) { 2160 Width = Target->getFloat128Width(); 2161 Align = Target->getFloat128Align(); 2162 } else { 2163 assert(getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice && 2164 "Expected OpenMP device compilation."); 2165 Width = AuxTarget->getFloat128Width(); 2166 Align = AuxTarget->getFloat128Align(); 2167 } 2168 break; 2169 case BuiltinType::NullPtr: 2170 Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t) 2171 Align = Target->getPointerAlign(0); // == sizeof(void*) 2172 break; 2173 case BuiltinType::ObjCId: 2174 case BuiltinType::ObjCClass: 2175 case BuiltinType::ObjCSel: 2176 Width = Target->getPointerWidth(0); 2177 Align = Target->getPointerAlign(0); 2178 break; 2179 case BuiltinType::OCLSampler: 2180 case BuiltinType::OCLEvent: 2181 case BuiltinType::OCLClkEvent: 2182 case BuiltinType::OCLQueue: 2183 case BuiltinType::OCLReserveID: 2184 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 2185 case BuiltinType::Id: 2186 #include "clang/Basic/OpenCLImageTypes.def" 2187 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 2188 case BuiltinType::Id: 2189 #include "clang/Basic/OpenCLExtensionTypes.def" 2190 AS = getTargetAddressSpace( 2191 Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T))); 2192 Width = Target->getPointerWidth(AS); 2193 Align = Target->getPointerAlign(AS); 2194 break; 2195 // The SVE types are effectively target-specific. The length of an 2196 // SVE_VECTOR_TYPE is only known at runtime, but it is always a multiple 2197 // of 128 bits. There is one predicate bit for each vector byte, so the 2198 // length of an SVE_PREDICATE_TYPE is always a multiple of 16 bits. 2199 // 2200 // Because the length is only known at runtime, we use a dummy value 2201 // of 0 for the static length. The alignment values are those defined 2202 // by the Procedure Call Standard for the Arm Architecture. 2203 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 2204 IsSigned, IsFP, IsBF) \ 2205 case BuiltinType::Id: \ 2206 Width = 0; \ 2207 Align = 128; \ 2208 break; 2209 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 2210 case BuiltinType::Id: \ 2211 Width = 0; \ 2212 Align = 16; \ 2213 break; 2214 #include "clang/Basic/AArch64SVEACLETypes.def" 2215 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 2216 case BuiltinType::Id: \ 2217 Width = Size; \ 2218 Align = Size; \ 2219 break; 2220 #include "clang/Basic/PPCTypes.def" 2221 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \ 2222 IsFP) \ 2223 case BuiltinType::Id: \ 2224 Width = 0; \ 2225 Align = ElBits; \ 2226 break; 2227 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \ 2228 case BuiltinType::Id: \ 2229 Width = 0; \ 2230 Align = 8; \ 2231 break; 2232 #include "clang/Basic/RISCVVTypes.def" 2233 } 2234 break; 2235 case Type::ObjCObjectPointer: 2236 Width = Target->getPointerWidth(0); 2237 Align = Target->getPointerAlign(0); 2238 break; 2239 case Type::BlockPointer: 2240 AS = getTargetAddressSpace(cast<BlockPointerType>(T)->getPointeeType()); 2241 Width = Target->getPointerWidth(AS); 2242 Align = Target->getPointerAlign(AS); 2243 break; 2244 case Type::LValueReference: 2245 case Type::RValueReference: 2246 // alignof and sizeof should never enter this code path here, so we go 2247 // the pointer route. 2248 AS = getTargetAddressSpace(cast<ReferenceType>(T)->getPointeeType()); 2249 Width = Target->getPointerWidth(AS); 2250 Align = Target->getPointerAlign(AS); 2251 break; 2252 case Type::Pointer: 2253 AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType()); 2254 Width = Target->getPointerWidth(AS); 2255 Align = Target->getPointerAlign(AS); 2256 break; 2257 case Type::MemberPointer: { 2258 const auto *MPT = cast<MemberPointerType>(T); 2259 CXXABI::MemberPointerInfo MPI = ABI->getMemberPointerInfo(MPT); 2260 Width = MPI.Width; 2261 Align = MPI.Align; 2262 break; 2263 } 2264 case Type::Complex: { 2265 // Complex types have the same alignment as their elements, but twice the 2266 // size. 2267 TypeInfo EltInfo = getTypeInfo(cast<ComplexType>(T)->getElementType()); 2268 Width = EltInfo.Width * 2; 2269 Align = EltInfo.Align; 2270 break; 2271 } 2272 case Type::ObjCObject: 2273 return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr()); 2274 case Type::Adjusted: 2275 case Type::Decayed: 2276 return getTypeInfo(cast<AdjustedType>(T)->getAdjustedType().getTypePtr()); 2277 case Type::ObjCInterface: { 2278 const auto *ObjCI = cast<ObjCInterfaceType>(T); 2279 if (ObjCI->getDecl()->isInvalidDecl()) { 2280 Width = 8; 2281 Align = 8; 2282 break; 2283 } 2284 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2285 Width = toBits(Layout.getSize()); 2286 Align = toBits(Layout.getAlignment()); 2287 break; 2288 } 2289 case Type::ExtInt: { 2290 const auto *EIT = cast<ExtIntType>(T); 2291 Align = 2292 std::min(static_cast<unsigned>(std::max( 2293 getCharWidth(), llvm::PowerOf2Ceil(EIT->getNumBits()))), 2294 Target->getLongLongAlign()); 2295 Width = llvm::alignTo(EIT->getNumBits(), Align); 2296 break; 2297 } 2298 case Type::Record: 2299 case Type::Enum: { 2300 const auto *TT = cast<TagType>(T); 2301 2302 if (TT->getDecl()->isInvalidDecl()) { 2303 Width = 8; 2304 Align = 8; 2305 break; 2306 } 2307 2308 if (const auto *ET = dyn_cast<EnumType>(TT)) { 2309 const EnumDecl *ED = ET->getDecl(); 2310 TypeInfo Info = 2311 getTypeInfo(ED->getIntegerType()->getUnqualifiedDesugaredType()); 2312 if (unsigned AttrAlign = ED->getMaxAlignment()) { 2313 Info.Align = AttrAlign; 2314 Info.AlignRequirement = AlignRequirementKind::RequiredByEnum; 2315 } 2316 return Info; 2317 } 2318 2319 const auto *RT = cast<RecordType>(TT); 2320 const RecordDecl *RD = RT->getDecl(); 2321 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2322 Width = toBits(Layout.getSize()); 2323 Align = toBits(Layout.getAlignment()); 2324 AlignRequirement = RD->hasAttr<AlignedAttr>() 2325 ? AlignRequirementKind::RequiredByRecord 2326 : AlignRequirementKind::None; 2327 break; 2328 } 2329 2330 case Type::SubstTemplateTypeParm: 2331 return getTypeInfo(cast<SubstTemplateTypeParmType>(T)-> 2332 getReplacementType().getTypePtr()); 2333 2334 case Type::Auto: 2335 case Type::DeducedTemplateSpecialization: { 2336 const auto *A = cast<DeducedType>(T); 2337 assert(!A->getDeducedType().isNull() && 2338 "cannot request the size of an undeduced or dependent auto type"); 2339 return getTypeInfo(A->getDeducedType().getTypePtr()); 2340 } 2341 2342 case Type::Paren: 2343 return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr()); 2344 2345 case Type::MacroQualified: 2346 return getTypeInfo( 2347 cast<MacroQualifiedType>(T)->getUnderlyingType().getTypePtr()); 2348 2349 case Type::ObjCTypeParam: 2350 return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr()); 2351 2352 case Type::Typedef: { 2353 const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl(); 2354 TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr()); 2355 // If the typedef has an aligned attribute on it, it overrides any computed 2356 // alignment we have. This violates the GCC documentation (which says that 2357 // attribute(aligned) can only round up) but matches its implementation. 2358 if (unsigned AttrAlign = Typedef->getMaxAlignment()) { 2359 Align = AttrAlign; 2360 AlignRequirement = AlignRequirementKind::RequiredByTypedef; 2361 } else { 2362 Align = Info.Align; 2363 AlignRequirement = Info.AlignRequirement; 2364 } 2365 Width = Info.Width; 2366 break; 2367 } 2368 2369 case Type::Elaborated: 2370 return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr()); 2371 2372 case Type::Attributed: 2373 return getTypeInfo( 2374 cast<AttributedType>(T)->getEquivalentType().getTypePtr()); 2375 2376 case Type::Atomic: { 2377 // Start with the base type information. 2378 TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType()); 2379 Width = Info.Width; 2380 Align = Info.Align; 2381 2382 if (!Width) { 2383 // An otherwise zero-sized type should still generate an 2384 // atomic operation. 2385 Width = Target->getCharWidth(); 2386 assert(Align); 2387 } else if (Width <= Target->getMaxAtomicPromoteWidth()) { 2388 // If the size of the type doesn't exceed the platform's max 2389 // atomic promotion width, make the size and alignment more 2390 // favorable to atomic operations: 2391 2392 // Round the size up to a power of 2. 2393 if (!llvm::isPowerOf2_64(Width)) 2394 Width = llvm::NextPowerOf2(Width); 2395 2396 // Set the alignment equal to the size. 2397 Align = static_cast<unsigned>(Width); 2398 } 2399 } 2400 break; 2401 2402 case Type::Pipe: 2403 Width = Target->getPointerWidth(getTargetAddressSpace(LangAS::opencl_global)); 2404 Align = Target->getPointerAlign(getTargetAddressSpace(LangAS::opencl_global)); 2405 break; 2406 } 2407 2408 assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2"); 2409 return TypeInfo(Width, Align, AlignRequirement); 2410 } 2411 2412 unsigned ASTContext::getTypeUnadjustedAlign(const Type *T) const { 2413 UnadjustedAlignMap::iterator I = MemoizedUnadjustedAlign.find(T); 2414 if (I != MemoizedUnadjustedAlign.end()) 2415 return I->second; 2416 2417 unsigned UnadjustedAlign; 2418 if (const auto *RT = T->getAs<RecordType>()) { 2419 const RecordDecl *RD = RT->getDecl(); 2420 const ASTRecordLayout &Layout = getASTRecordLayout(RD); 2421 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2422 } else if (const auto *ObjCI = T->getAs<ObjCInterfaceType>()) { 2423 const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl()); 2424 UnadjustedAlign = toBits(Layout.getUnadjustedAlignment()); 2425 } else { 2426 UnadjustedAlign = getTypeAlign(T->getUnqualifiedDesugaredType()); 2427 } 2428 2429 MemoizedUnadjustedAlign[T] = UnadjustedAlign; 2430 return UnadjustedAlign; 2431 } 2432 2433 unsigned ASTContext::getOpenMPDefaultSimdAlign(QualType T) const { 2434 unsigned SimdAlign = getTargetInfo().getSimdDefaultAlign(); 2435 return SimdAlign; 2436 } 2437 2438 /// toCharUnitsFromBits - Convert a size in bits to a size in characters. 2439 CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const { 2440 return CharUnits::fromQuantity(BitSize / getCharWidth()); 2441 } 2442 2443 /// toBits - Convert a size in characters to a size in characters. 2444 int64_t ASTContext::toBits(CharUnits CharSize) const { 2445 return CharSize.getQuantity() * getCharWidth(); 2446 } 2447 2448 /// getTypeSizeInChars - Return the size of the specified type, in characters. 2449 /// This method does not work on incomplete types. 2450 CharUnits ASTContext::getTypeSizeInChars(QualType T) const { 2451 return getTypeInfoInChars(T).Width; 2452 } 2453 CharUnits ASTContext::getTypeSizeInChars(const Type *T) const { 2454 return getTypeInfoInChars(T).Width; 2455 } 2456 2457 /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in 2458 /// characters. This method does not work on incomplete types. 2459 CharUnits ASTContext::getTypeAlignInChars(QualType T) const { 2460 return toCharUnitsFromBits(getTypeAlign(T)); 2461 } 2462 CharUnits ASTContext::getTypeAlignInChars(const Type *T) const { 2463 return toCharUnitsFromBits(getTypeAlign(T)); 2464 } 2465 2466 /// getTypeUnadjustedAlignInChars - Return the ABI-specified alignment of a 2467 /// type, in characters, before alignment adustments. This method does 2468 /// not work on incomplete types. 2469 CharUnits ASTContext::getTypeUnadjustedAlignInChars(QualType T) const { 2470 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2471 } 2472 CharUnits ASTContext::getTypeUnadjustedAlignInChars(const Type *T) const { 2473 return toCharUnitsFromBits(getTypeUnadjustedAlign(T)); 2474 } 2475 2476 /// getPreferredTypeAlign - Return the "preferred" alignment of the specified 2477 /// type for the current target in bits. This can be different than the ABI 2478 /// alignment in cases where it is beneficial for performance or backwards 2479 /// compatibility preserving to overalign a data type. (Note: despite the name, 2480 /// the preferred alignment is ABI-impacting, and not an optimization.) 2481 unsigned ASTContext::getPreferredTypeAlign(const Type *T) const { 2482 TypeInfo TI = getTypeInfo(T); 2483 unsigned ABIAlign = TI.Align; 2484 2485 T = T->getBaseElementTypeUnsafe(); 2486 2487 // The preferred alignment of member pointers is that of a pointer. 2488 if (T->isMemberPointerType()) 2489 return getPreferredTypeAlign(getPointerDiffType().getTypePtr()); 2490 2491 if (!Target->allowsLargerPreferedTypeAlignment()) 2492 return ABIAlign; 2493 2494 if (const auto *RT = T->getAs<RecordType>()) { 2495 const RecordDecl *RD = RT->getDecl(); 2496 2497 // When used as part of a typedef, or together with a 'packed' attribute, 2498 // the 'aligned' attribute can be used to decrease alignment. Note that the 2499 // 'packed' case is already taken into consideration when computing the 2500 // alignment, we only need to handle the typedef case here. 2501 if (TI.AlignRequirement == AlignRequirementKind::RequiredByTypedef || 2502 RD->isInvalidDecl()) 2503 return ABIAlign; 2504 2505 unsigned PreferredAlign = static_cast<unsigned>( 2506 toBits(getASTRecordLayout(RD).PreferredAlignment)); 2507 assert(PreferredAlign >= ABIAlign && 2508 "PreferredAlign should be at least as large as ABIAlign."); 2509 return PreferredAlign; 2510 } 2511 2512 // Double (and, for targets supporting AIX `power` alignment, long double) and 2513 // long long should be naturally aligned (despite requiring less alignment) if 2514 // possible. 2515 if (const auto *CT = T->getAs<ComplexType>()) 2516 T = CT->getElementType().getTypePtr(); 2517 if (const auto *ET = T->getAs<EnumType>()) 2518 T = ET->getDecl()->getIntegerType().getTypePtr(); 2519 if (T->isSpecificBuiltinType(BuiltinType::Double) || 2520 T->isSpecificBuiltinType(BuiltinType::LongLong) || 2521 T->isSpecificBuiltinType(BuiltinType::ULongLong) || 2522 (T->isSpecificBuiltinType(BuiltinType::LongDouble) && 2523 Target->defaultsToAIXPowerAlignment())) 2524 // Don't increase the alignment if an alignment attribute was specified on a 2525 // typedef declaration. 2526 if (!TI.isAlignRequired()) 2527 return std::max(ABIAlign, (unsigned)getTypeSize(T)); 2528 2529 return ABIAlign; 2530 } 2531 2532 /// getTargetDefaultAlignForAttributeAligned - Return the default alignment 2533 /// for __attribute__((aligned)) on this target, to be used if no alignment 2534 /// value is specified. 2535 unsigned ASTContext::getTargetDefaultAlignForAttributeAligned() const { 2536 return getTargetInfo().getDefaultAlignForAttributeAligned(); 2537 } 2538 2539 /// getAlignOfGlobalVar - Return the alignment in bits that should be given 2540 /// to a global variable of the specified type. 2541 unsigned ASTContext::getAlignOfGlobalVar(QualType T) const { 2542 uint64_t TypeSize = getTypeSize(T.getTypePtr()); 2543 return std::max(getPreferredTypeAlign(T), 2544 getTargetInfo().getMinGlobalAlign(TypeSize)); 2545 } 2546 2547 /// getAlignOfGlobalVarInChars - Return the alignment in characters that 2548 /// should be given to a global variable of the specified type. 2549 CharUnits ASTContext::getAlignOfGlobalVarInChars(QualType T) const { 2550 return toCharUnitsFromBits(getAlignOfGlobalVar(T)); 2551 } 2552 2553 CharUnits ASTContext::getOffsetOfBaseWithVBPtr(const CXXRecordDecl *RD) const { 2554 CharUnits Offset = CharUnits::Zero(); 2555 const ASTRecordLayout *Layout = &getASTRecordLayout(RD); 2556 while (const CXXRecordDecl *Base = Layout->getBaseSharingVBPtr()) { 2557 Offset += Layout->getBaseClassOffset(Base); 2558 Layout = &getASTRecordLayout(Base); 2559 } 2560 return Offset; 2561 } 2562 2563 CharUnits ASTContext::getMemberPointerPathAdjustment(const APValue &MP) const { 2564 const ValueDecl *MPD = MP.getMemberPointerDecl(); 2565 CharUnits ThisAdjustment = CharUnits::Zero(); 2566 ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath(); 2567 bool DerivedMember = MP.isMemberPointerToDerivedMember(); 2568 const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext()); 2569 for (unsigned I = 0, N = Path.size(); I != N; ++I) { 2570 const CXXRecordDecl *Base = RD; 2571 const CXXRecordDecl *Derived = Path[I]; 2572 if (DerivedMember) 2573 std::swap(Base, Derived); 2574 ThisAdjustment += getASTRecordLayout(Derived).getBaseClassOffset(Base); 2575 RD = Path[I]; 2576 } 2577 if (DerivedMember) 2578 ThisAdjustment = -ThisAdjustment; 2579 return ThisAdjustment; 2580 } 2581 2582 /// DeepCollectObjCIvars - 2583 /// This routine first collects all declared, but not synthesized, ivars in 2584 /// super class and then collects all ivars, including those synthesized for 2585 /// current class. This routine is used for implementation of current class 2586 /// when all ivars, declared and synthesized are known. 2587 void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, 2588 bool leafClass, 2589 SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const { 2590 if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass()) 2591 DeepCollectObjCIvars(SuperClass, false, Ivars); 2592 if (!leafClass) { 2593 for (const auto *I : OI->ivars()) 2594 Ivars.push_back(I); 2595 } else { 2596 auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI); 2597 for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; 2598 Iv= Iv->getNextIvar()) 2599 Ivars.push_back(Iv); 2600 } 2601 } 2602 2603 /// CollectInheritedProtocols - Collect all protocols in current class and 2604 /// those inherited by it. 2605 void ASTContext::CollectInheritedProtocols(const Decl *CDecl, 2606 llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) { 2607 if (const auto *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) { 2608 // We can use protocol_iterator here instead of 2609 // all_referenced_protocol_iterator since we are walking all categories. 2610 for (auto *Proto : OI->all_referenced_protocols()) { 2611 CollectInheritedProtocols(Proto, Protocols); 2612 } 2613 2614 // Categories of this Interface. 2615 for (const auto *Cat : OI->visible_categories()) 2616 CollectInheritedProtocols(Cat, Protocols); 2617 2618 if (ObjCInterfaceDecl *SD = OI->getSuperClass()) 2619 while (SD) { 2620 CollectInheritedProtocols(SD, Protocols); 2621 SD = SD->getSuperClass(); 2622 } 2623 } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) { 2624 for (auto *Proto : OC->protocols()) { 2625 CollectInheritedProtocols(Proto, Protocols); 2626 } 2627 } else if (const auto *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) { 2628 // Insert the protocol. 2629 if (!Protocols.insert( 2630 const_cast<ObjCProtocolDecl *>(OP->getCanonicalDecl())).second) 2631 return; 2632 2633 for (auto *Proto : OP->protocols()) 2634 CollectInheritedProtocols(Proto, Protocols); 2635 } 2636 } 2637 2638 static bool unionHasUniqueObjectRepresentations(const ASTContext &Context, 2639 const RecordDecl *RD) { 2640 assert(RD->isUnion() && "Must be union type"); 2641 CharUnits UnionSize = Context.getTypeSizeInChars(RD->getTypeForDecl()); 2642 2643 for (const auto *Field : RD->fields()) { 2644 if (!Context.hasUniqueObjectRepresentations(Field->getType())) 2645 return false; 2646 CharUnits FieldSize = Context.getTypeSizeInChars(Field->getType()); 2647 if (FieldSize != UnionSize) 2648 return false; 2649 } 2650 return !RD->field_empty(); 2651 } 2652 2653 static int64_t getSubobjectOffset(const FieldDecl *Field, 2654 const ASTContext &Context, 2655 const clang::ASTRecordLayout & /*Layout*/) { 2656 return Context.getFieldOffset(Field); 2657 } 2658 2659 static int64_t getSubobjectOffset(const CXXRecordDecl *RD, 2660 const ASTContext &Context, 2661 const clang::ASTRecordLayout &Layout) { 2662 return Context.toBits(Layout.getBaseClassOffset(RD)); 2663 } 2664 2665 static llvm::Optional<int64_t> 2666 structHasUniqueObjectRepresentations(const ASTContext &Context, 2667 const RecordDecl *RD); 2668 2669 static llvm::Optional<int64_t> 2670 getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context) { 2671 if (Field->getType()->isRecordType()) { 2672 const RecordDecl *RD = Field->getType()->getAsRecordDecl(); 2673 if (!RD->isUnion()) 2674 return structHasUniqueObjectRepresentations(Context, RD); 2675 } 2676 if (!Field->getType()->isReferenceType() && 2677 !Context.hasUniqueObjectRepresentations(Field->getType())) 2678 return llvm::None; 2679 2680 int64_t FieldSizeInBits = 2681 Context.toBits(Context.getTypeSizeInChars(Field->getType())); 2682 if (Field->isBitField()) { 2683 int64_t BitfieldSize = Field->getBitWidthValue(Context); 2684 if (BitfieldSize > FieldSizeInBits) 2685 return llvm::None; 2686 FieldSizeInBits = BitfieldSize; 2687 } 2688 return FieldSizeInBits; 2689 } 2690 2691 static llvm::Optional<int64_t> 2692 getSubobjectSizeInBits(const CXXRecordDecl *RD, const ASTContext &Context) { 2693 return structHasUniqueObjectRepresentations(Context, RD); 2694 } 2695 2696 template <typename RangeT> 2697 static llvm::Optional<int64_t> structSubobjectsHaveUniqueObjectRepresentations( 2698 const RangeT &Subobjects, int64_t CurOffsetInBits, 2699 const ASTContext &Context, const clang::ASTRecordLayout &Layout) { 2700 for (const auto *Subobject : Subobjects) { 2701 llvm::Optional<int64_t> SizeInBits = 2702 getSubobjectSizeInBits(Subobject, Context); 2703 if (!SizeInBits) 2704 return llvm::None; 2705 if (*SizeInBits != 0) { 2706 int64_t Offset = getSubobjectOffset(Subobject, Context, Layout); 2707 if (Offset != CurOffsetInBits) 2708 return llvm::None; 2709 CurOffsetInBits += *SizeInBits; 2710 } 2711 } 2712 return CurOffsetInBits; 2713 } 2714 2715 static llvm::Optional<int64_t> 2716 structHasUniqueObjectRepresentations(const ASTContext &Context, 2717 const RecordDecl *RD) { 2718 assert(!RD->isUnion() && "Must be struct/class type"); 2719 const auto &Layout = Context.getASTRecordLayout(RD); 2720 2721 int64_t CurOffsetInBits = 0; 2722 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RD)) { 2723 if (ClassDecl->isDynamicClass()) 2724 return llvm::None; 2725 2726 SmallVector<CXXRecordDecl *, 4> Bases; 2727 for (const auto &Base : ClassDecl->bases()) { 2728 // Empty types can be inherited from, and non-empty types can potentially 2729 // have tail padding, so just make sure there isn't an error. 2730 Bases.emplace_back(Base.getType()->getAsCXXRecordDecl()); 2731 } 2732 2733 llvm::sort(Bases, [&](const CXXRecordDecl *L, const CXXRecordDecl *R) { 2734 return Layout.getBaseClassOffset(L) < Layout.getBaseClassOffset(R); 2735 }); 2736 2737 llvm::Optional<int64_t> OffsetAfterBases = 2738 structSubobjectsHaveUniqueObjectRepresentations(Bases, CurOffsetInBits, 2739 Context, Layout); 2740 if (!OffsetAfterBases) 2741 return llvm::None; 2742 CurOffsetInBits = *OffsetAfterBases; 2743 } 2744 2745 llvm::Optional<int64_t> OffsetAfterFields = 2746 structSubobjectsHaveUniqueObjectRepresentations( 2747 RD->fields(), CurOffsetInBits, Context, Layout); 2748 if (!OffsetAfterFields) 2749 return llvm::None; 2750 CurOffsetInBits = *OffsetAfterFields; 2751 2752 return CurOffsetInBits; 2753 } 2754 2755 bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const { 2756 // C++17 [meta.unary.prop]: 2757 // The predicate condition for a template specialization 2758 // has_unique_object_representations<T> shall be 2759 // satisfied if and only if: 2760 // (9.1) - T is trivially copyable, and 2761 // (9.2) - any two objects of type T with the same value have the same 2762 // object representation, where two objects 2763 // of array or non-union class type are considered to have the same value 2764 // if their respective sequences of 2765 // direct subobjects have the same values, and two objects of union type 2766 // are considered to have the same 2767 // value if they have the same active member and the corresponding members 2768 // have the same value. 2769 // The set of scalar types for which this condition holds is 2770 // implementation-defined. [ Note: If a type has padding 2771 // bits, the condition does not hold; otherwise, the condition holds true 2772 // for unsigned integral types. -- end note ] 2773 assert(!Ty.isNull() && "Null QualType sent to unique object rep check"); 2774 2775 // Arrays are unique only if their element type is unique. 2776 if (Ty->isArrayType()) 2777 return hasUniqueObjectRepresentations(getBaseElementType(Ty)); 2778 2779 // (9.1) - T is trivially copyable... 2780 if (!Ty.isTriviallyCopyableType(*this)) 2781 return false; 2782 2783 // All integrals and enums are unique. 2784 if (Ty->isIntegralOrEnumerationType()) 2785 return true; 2786 2787 // All other pointers are unique. 2788 if (Ty->isPointerType()) 2789 return true; 2790 2791 if (Ty->isMemberPointerType()) { 2792 const auto *MPT = Ty->getAs<MemberPointerType>(); 2793 return !ABI->getMemberPointerInfo(MPT).HasPadding; 2794 } 2795 2796 if (Ty->isRecordType()) { 2797 const RecordDecl *Record = Ty->castAs<RecordType>()->getDecl(); 2798 2799 if (Record->isInvalidDecl()) 2800 return false; 2801 2802 if (Record->isUnion()) 2803 return unionHasUniqueObjectRepresentations(*this, Record); 2804 2805 Optional<int64_t> StructSize = 2806 structHasUniqueObjectRepresentations(*this, Record); 2807 2808 return StructSize && 2809 StructSize.getValue() == static_cast<int64_t>(getTypeSize(Ty)); 2810 } 2811 2812 // FIXME: More cases to handle here (list by rsmith): 2813 // vectors (careful about, eg, vector of 3 foo) 2814 // _Complex int and friends 2815 // _Atomic T 2816 // Obj-C block pointers 2817 // Obj-C object pointers 2818 // and perhaps OpenCL's various builtin types (pipe, sampler_t, event_t, 2819 // clk_event_t, queue_t, reserve_id_t) 2820 // There're also Obj-C class types and the Obj-C selector type, but I think it 2821 // makes sense for those to return false here. 2822 2823 return false; 2824 } 2825 2826 unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const { 2827 unsigned count = 0; 2828 // Count ivars declared in class extension. 2829 for (const auto *Ext : OI->known_extensions()) 2830 count += Ext->ivar_size(); 2831 2832 // Count ivar defined in this class's implementation. This 2833 // includes synthesized ivars. 2834 if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) 2835 count += ImplDecl->ivar_size(); 2836 2837 return count; 2838 } 2839 2840 bool ASTContext::isSentinelNullExpr(const Expr *E) { 2841 if (!E) 2842 return false; 2843 2844 // nullptr_t is always treated as null. 2845 if (E->getType()->isNullPtrType()) return true; 2846 2847 if (E->getType()->isAnyPointerType() && 2848 E->IgnoreParenCasts()->isNullPointerConstant(*this, 2849 Expr::NPC_ValueDependentIsNull)) 2850 return true; 2851 2852 // Unfortunately, __null has type 'int'. 2853 if (isa<GNUNullExpr>(E)) return true; 2854 2855 return false; 2856 } 2857 2858 /// Get the implementation of ObjCInterfaceDecl, or nullptr if none 2859 /// exists. 2860 ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) { 2861 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2862 I = ObjCImpls.find(D); 2863 if (I != ObjCImpls.end()) 2864 return cast<ObjCImplementationDecl>(I->second); 2865 return nullptr; 2866 } 2867 2868 /// Get the implementation of ObjCCategoryDecl, or nullptr if none 2869 /// exists. 2870 ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) { 2871 llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator 2872 I = ObjCImpls.find(D); 2873 if (I != ObjCImpls.end()) 2874 return cast<ObjCCategoryImplDecl>(I->second); 2875 return nullptr; 2876 } 2877 2878 /// Set the implementation of ObjCInterfaceDecl. 2879 void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD, 2880 ObjCImplementationDecl *ImplD) { 2881 assert(IFaceD && ImplD && "Passed null params"); 2882 ObjCImpls[IFaceD] = ImplD; 2883 } 2884 2885 /// Set the implementation of ObjCCategoryDecl. 2886 void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD, 2887 ObjCCategoryImplDecl *ImplD) { 2888 assert(CatD && ImplD && "Passed null params"); 2889 ObjCImpls[CatD] = ImplD; 2890 } 2891 2892 const ObjCMethodDecl * 2893 ASTContext::getObjCMethodRedeclaration(const ObjCMethodDecl *MD) const { 2894 return ObjCMethodRedecls.lookup(MD); 2895 } 2896 2897 void ASTContext::setObjCMethodRedeclaration(const ObjCMethodDecl *MD, 2898 const ObjCMethodDecl *Redecl) { 2899 assert(!getObjCMethodRedeclaration(MD) && "MD already has a redeclaration"); 2900 ObjCMethodRedecls[MD] = Redecl; 2901 } 2902 2903 const ObjCInterfaceDecl *ASTContext::getObjContainingInterface( 2904 const NamedDecl *ND) const { 2905 if (const auto *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext())) 2906 return ID; 2907 if (const auto *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext())) 2908 return CD->getClassInterface(); 2909 if (const auto *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext())) 2910 return IMD->getClassInterface(); 2911 2912 return nullptr; 2913 } 2914 2915 /// Get the copy initialization expression of VarDecl, or nullptr if 2916 /// none exists. 2917 BlockVarCopyInit ASTContext::getBlockVarCopyInit(const VarDecl *VD) const { 2918 assert(VD && "Passed null params"); 2919 assert(VD->hasAttr<BlocksAttr>() && 2920 "getBlockVarCopyInits - not __block var"); 2921 auto I = BlockVarCopyInits.find(VD); 2922 if (I != BlockVarCopyInits.end()) 2923 return I->second; 2924 return {nullptr, false}; 2925 } 2926 2927 /// Set the copy initialization expression of a block var decl. 2928 void ASTContext::setBlockVarCopyInit(const VarDecl*VD, Expr *CopyExpr, 2929 bool CanThrow) { 2930 assert(VD && CopyExpr && "Passed null params"); 2931 assert(VD->hasAttr<BlocksAttr>() && 2932 "setBlockVarCopyInits - not __block var"); 2933 BlockVarCopyInits[VD].setExprAndFlag(CopyExpr, CanThrow); 2934 } 2935 2936 TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T, 2937 unsigned DataSize) const { 2938 if (!DataSize) 2939 DataSize = TypeLoc::getFullDataSizeForType(T); 2940 else 2941 assert(DataSize == TypeLoc::getFullDataSizeForType(T) && 2942 "incorrect data size provided to CreateTypeSourceInfo!"); 2943 2944 auto *TInfo = 2945 (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8); 2946 new (TInfo) TypeSourceInfo(T); 2947 return TInfo; 2948 } 2949 2950 TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T, 2951 SourceLocation L) const { 2952 TypeSourceInfo *DI = CreateTypeSourceInfo(T); 2953 DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L); 2954 return DI; 2955 } 2956 2957 const ASTRecordLayout & 2958 ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const { 2959 return getObjCLayout(D, nullptr); 2960 } 2961 2962 const ASTRecordLayout & 2963 ASTContext::getASTObjCImplementationLayout( 2964 const ObjCImplementationDecl *D) const { 2965 return getObjCLayout(D->getClassInterface(), D); 2966 } 2967 2968 //===----------------------------------------------------------------------===// 2969 // Type creation/memoization methods 2970 //===----------------------------------------------------------------------===// 2971 2972 QualType 2973 ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const { 2974 unsigned fastQuals = quals.getFastQualifiers(); 2975 quals.removeFastQualifiers(); 2976 2977 // Check if we've already instantiated this type. 2978 llvm::FoldingSetNodeID ID; 2979 ExtQuals::Profile(ID, baseType, quals); 2980 void *insertPos = nullptr; 2981 if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) { 2982 assert(eq->getQualifiers() == quals); 2983 return QualType(eq, fastQuals); 2984 } 2985 2986 // If the base type is not canonical, make the appropriate canonical type. 2987 QualType canon; 2988 if (!baseType->isCanonicalUnqualified()) { 2989 SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split(); 2990 canonSplit.Quals.addConsistentQualifiers(quals); 2991 canon = getExtQualType(canonSplit.Ty, canonSplit.Quals); 2992 2993 // Re-find the insert position. 2994 (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos); 2995 } 2996 2997 auto *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals); 2998 ExtQualNodes.InsertNode(eq, insertPos); 2999 return QualType(eq, fastQuals); 3000 } 3001 3002 QualType ASTContext::getAddrSpaceQualType(QualType T, 3003 LangAS AddressSpace) const { 3004 QualType CanT = getCanonicalType(T); 3005 if (CanT.getAddressSpace() == AddressSpace) 3006 return T; 3007 3008 // If we are composing extended qualifiers together, merge together 3009 // into one ExtQuals node. 3010 QualifierCollector Quals; 3011 const Type *TypeNode = Quals.strip(T); 3012 3013 // If this type already has an address space specified, it cannot get 3014 // another one. 3015 assert(!Quals.hasAddressSpace() && 3016 "Type cannot be in multiple addr spaces!"); 3017 Quals.addAddressSpace(AddressSpace); 3018 3019 return getExtQualType(TypeNode, Quals); 3020 } 3021 3022 QualType ASTContext::removeAddrSpaceQualType(QualType T) const { 3023 // If the type is not qualified with an address space, just return it 3024 // immediately. 3025 if (!T.hasAddressSpace()) 3026 return T; 3027 3028 // If we are composing extended qualifiers together, merge together 3029 // into one ExtQuals node. 3030 QualifierCollector Quals; 3031 const Type *TypeNode; 3032 3033 while (T.hasAddressSpace()) { 3034 TypeNode = Quals.strip(T); 3035 3036 // If the type no longer has an address space after stripping qualifiers, 3037 // jump out. 3038 if (!QualType(TypeNode, 0).hasAddressSpace()) 3039 break; 3040 3041 // There might be sugar in the way. Strip it and try again. 3042 T = T.getSingleStepDesugaredType(*this); 3043 } 3044 3045 Quals.removeAddressSpace(); 3046 3047 // Removal of the address space can mean there are no longer any 3048 // non-fast qualifiers, so creating an ExtQualType isn't possible (asserts) 3049 // or required. 3050 if (Quals.hasNonFastQualifiers()) 3051 return getExtQualType(TypeNode, Quals); 3052 else 3053 return QualType(TypeNode, Quals.getFastQualifiers()); 3054 } 3055 3056 QualType ASTContext::getObjCGCQualType(QualType T, 3057 Qualifiers::GC GCAttr) const { 3058 QualType CanT = getCanonicalType(T); 3059 if (CanT.getObjCGCAttr() == GCAttr) 3060 return T; 3061 3062 if (const auto *ptr = T->getAs<PointerType>()) { 3063 QualType Pointee = ptr->getPointeeType(); 3064 if (Pointee->isAnyPointerType()) { 3065 QualType ResultType = getObjCGCQualType(Pointee, GCAttr); 3066 return getPointerType(ResultType); 3067 } 3068 } 3069 3070 // If we are composing extended qualifiers together, merge together 3071 // into one ExtQuals node. 3072 QualifierCollector Quals; 3073 const Type *TypeNode = Quals.strip(T); 3074 3075 // If this type already has an ObjCGC specified, it cannot get 3076 // another one. 3077 assert(!Quals.hasObjCGCAttr() && 3078 "Type cannot have multiple ObjCGCs!"); 3079 Quals.addObjCGCAttr(GCAttr); 3080 3081 return getExtQualType(TypeNode, Quals); 3082 } 3083 3084 QualType ASTContext::removePtrSizeAddrSpace(QualType T) const { 3085 if (const PointerType *Ptr = T->getAs<PointerType>()) { 3086 QualType Pointee = Ptr->getPointeeType(); 3087 if (isPtrSizeAddressSpace(Pointee.getAddressSpace())) { 3088 return getPointerType(removeAddrSpaceQualType(Pointee)); 3089 } 3090 } 3091 return T; 3092 } 3093 3094 const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T, 3095 FunctionType::ExtInfo Info) { 3096 if (T->getExtInfo() == Info) 3097 return T; 3098 3099 QualType Result; 3100 if (const auto *FNPT = dyn_cast<FunctionNoProtoType>(T)) { 3101 Result = getFunctionNoProtoType(FNPT->getReturnType(), Info); 3102 } else { 3103 const auto *FPT = cast<FunctionProtoType>(T); 3104 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3105 EPI.ExtInfo = Info; 3106 Result = getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI); 3107 } 3108 3109 return cast<FunctionType>(Result.getTypePtr()); 3110 } 3111 3112 void ASTContext::adjustDeducedFunctionResultType(FunctionDecl *FD, 3113 QualType ResultType) { 3114 FD = FD->getMostRecentDecl(); 3115 while (true) { 3116 const auto *FPT = FD->getType()->castAs<FunctionProtoType>(); 3117 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 3118 FD->setType(getFunctionType(ResultType, FPT->getParamTypes(), EPI)); 3119 if (FunctionDecl *Next = FD->getPreviousDecl()) 3120 FD = Next; 3121 else 3122 break; 3123 } 3124 if (ASTMutationListener *L = getASTMutationListener()) 3125 L->DeducedReturnType(FD, ResultType); 3126 } 3127 3128 /// Get a function type and produce the equivalent function type with the 3129 /// specified exception specification. Type sugar that can be present on a 3130 /// declaration of a function with an exception specification is permitted 3131 /// and preserved. Other type sugar (for instance, typedefs) is not. 3132 QualType ASTContext::getFunctionTypeWithExceptionSpec( 3133 QualType Orig, const FunctionProtoType::ExceptionSpecInfo &ESI) { 3134 // Might have some parens. 3135 if (const auto *PT = dyn_cast<ParenType>(Orig)) 3136 return getParenType( 3137 getFunctionTypeWithExceptionSpec(PT->getInnerType(), ESI)); 3138 3139 // Might be wrapped in a macro qualified type. 3140 if (const auto *MQT = dyn_cast<MacroQualifiedType>(Orig)) 3141 return getMacroQualifiedType( 3142 getFunctionTypeWithExceptionSpec(MQT->getUnderlyingType(), ESI), 3143 MQT->getMacroIdentifier()); 3144 3145 // Might have a calling-convention attribute. 3146 if (const auto *AT = dyn_cast<AttributedType>(Orig)) 3147 return getAttributedType( 3148 AT->getAttrKind(), 3149 getFunctionTypeWithExceptionSpec(AT->getModifiedType(), ESI), 3150 getFunctionTypeWithExceptionSpec(AT->getEquivalentType(), ESI)); 3151 3152 // Anything else must be a function type. Rebuild it with the new exception 3153 // specification. 3154 const auto *Proto = Orig->castAs<FunctionProtoType>(); 3155 return getFunctionType( 3156 Proto->getReturnType(), Proto->getParamTypes(), 3157 Proto->getExtProtoInfo().withExceptionSpec(ESI)); 3158 } 3159 3160 bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T, 3161 QualType U) { 3162 return hasSameType(T, U) || 3163 (getLangOpts().CPlusPlus17 && 3164 hasSameType(getFunctionTypeWithExceptionSpec(T, EST_None), 3165 getFunctionTypeWithExceptionSpec(U, EST_None))); 3166 } 3167 3168 QualType ASTContext::getFunctionTypeWithoutPtrSizes(QualType T) { 3169 if (const auto *Proto = T->getAs<FunctionProtoType>()) { 3170 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3171 SmallVector<QualType, 16> Args(Proto->param_types()); 3172 for (unsigned i = 0, n = Args.size(); i != n; ++i) 3173 Args[i] = removePtrSizeAddrSpace(Args[i]); 3174 return getFunctionType(RetTy, Args, Proto->getExtProtoInfo()); 3175 } 3176 3177 if (const FunctionNoProtoType *Proto = T->getAs<FunctionNoProtoType>()) { 3178 QualType RetTy = removePtrSizeAddrSpace(Proto->getReturnType()); 3179 return getFunctionNoProtoType(RetTy, Proto->getExtInfo()); 3180 } 3181 3182 return T; 3183 } 3184 3185 bool ASTContext::hasSameFunctionTypeIgnoringPtrSizes(QualType T, QualType U) { 3186 return hasSameType(T, U) || 3187 hasSameType(getFunctionTypeWithoutPtrSizes(T), 3188 getFunctionTypeWithoutPtrSizes(U)); 3189 } 3190 3191 void ASTContext::adjustExceptionSpec( 3192 FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI, 3193 bool AsWritten) { 3194 // Update the type. 3195 QualType Updated = 3196 getFunctionTypeWithExceptionSpec(FD->getType(), ESI); 3197 FD->setType(Updated); 3198 3199 if (!AsWritten) 3200 return; 3201 3202 // Update the type in the type source information too. 3203 if (TypeSourceInfo *TSInfo = FD->getTypeSourceInfo()) { 3204 // If the type and the type-as-written differ, we may need to update 3205 // the type-as-written too. 3206 if (TSInfo->getType() != FD->getType()) 3207 Updated = getFunctionTypeWithExceptionSpec(TSInfo->getType(), ESI); 3208 3209 // FIXME: When we get proper type location information for exceptions, 3210 // we'll also have to rebuild the TypeSourceInfo. For now, we just patch 3211 // up the TypeSourceInfo; 3212 assert(TypeLoc::getFullDataSizeForType(Updated) == 3213 TypeLoc::getFullDataSizeForType(TSInfo->getType()) && 3214 "TypeLoc size mismatch from updating exception specification"); 3215 TSInfo->overrideType(Updated); 3216 } 3217 } 3218 3219 /// getComplexType - Return the uniqued reference to the type for a complex 3220 /// number with the specified element type. 3221 QualType ASTContext::getComplexType(QualType T) const { 3222 // Unique pointers, to guarantee there is only one pointer of a particular 3223 // structure. 3224 llvm::FoldingSetNodeID ID; 3225 ComplexType::Profile(ID, T); 3226 3227 void *InsertPos = nullptr; 3228 if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos)) 3229 return QualType(CT, 0); 3230 3231 // If the pointee type isn't canonical, this won't be a canonical type either, 3232 // so fill in the canonical type field. 3233 QualType Canonical; 3234 if (!T.isCanonical()) { 3235 Canonical = getComplexType(getCanonicalType(T)); 3236 3237 // Get the new insert position for the node we care about. 3238 ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos); 3239 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3240 } 3241 auto *New = new (*this, TypeAlignment) ComplexType(T, Canonical); 3242 Types.push_back(New); 3243 ComplexTypes.InsertNode(New, InsertPos); 3244 return QualType(New, 0); 3245 } 3246 3247 /// getPointerType - Return the uniqued reference to the type for a pointer to 3248 /// the specified type. 3249 QualType ASTContext::getPointerType(QualType T) const { 3250 // Unique pointers, to guarantee there is only one pointer of a particular 3251 // structure. 3252 llvm::FoldingSetNodeID ID; 3253 PointerType::Profile(ID, T); 3254 3255 void *InsertPos = nullptr; 3256 if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3257 return QualType(PT, 0); 3258 3259 // If the pointee type isn't canonical, this won't be a canonical type either, 3260 // so fill in the canonical type field. 3261 QualType Canonical; 3262 if (!T.isCanonical()) { 3263 Canonical = getPointerType(getCanonicalType(T)); 3264 3265 // Get the new insert position for the node we care about. 3266 PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3267 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3268 } 3269 auto *New = new (*this, TypeAlignment) PointerType(T, Canonical); 3270 Types.push_back(New); 3271 PointerTypes.InsertNode(New, InsertPos); 3272 return QualType(New, 0); 3273 } 3274 3275 QualType ASTContext::getAdjustedType(QualType Orig, QualType New) const { 3276 llvm::FoldingSetNodeID ID; 3277 AdjustedType::Profile(ID, Orig, New); 3278 void *InsertPos = nullptr; 3279 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3280 if (AT) 3281 return QualType(AT, 0); 3282 3283 QualType Canonical = getCanonicalType(New); 3284 3285 // Get the new insert position for the node we care about. 3286 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3287 assert(!AT && "Shouldn't be in the map!"); 3288 3289 AT = new (*this, TypeAlignment) 3290 AdjustedType(Type::Adjusted, Orig, New, Canonical); 3291 Types.push_back(AT); 3292 AdjustedTypes.InsertNode(AT, InsertPos); 3293 return QualType(AT, 0); 3294 } 3295 3296 QualType ASTContext::getDecayedType(QualType T) const { 3297 assert((T->isArrayType() || T->isFunctionType()) && "T does not decay"); 3298 3299 QualType Decayed; 3300 3301 // C99 6.7.5.3p7: 3302 // A declaration of a parameter as "array of type" shall be 3303 // adjusted to "qualified pointer to type", where the type 3304 // qualifiers (if any) are those specified within the [ and ] of 3305 // the array type derivation. 3306 if (T->isArrayType()) 3307 Decayed = getArrayDecayedType(T); 3308 3309 // C99 6.7.5.3p8: 3310 // A declaration of a parameter as "function returning type" 3311 // shall be adjusted to "pointer to function returning type", as 3312 // in 6.3.2.1. 3313 if (T->isFunctionType()) 3314 Decayed = getPointerType(T); 3315 3316 llvm::FoldingSetNodeID ID; 3317 AdjustedType::Profile(ID, T, Decayed); 3318 void *InsertPos = nullptr; 3319 AdjustedType *AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3320 if (AT) 3321 return QualType(AT, 0); 3322 3323 QualType Canonical = getCanonicalType(Decayed); 3324 3325 // Get the new insert position for the node we care about. 3326 AT = AdjustedTypes.FindNodeOrInsertPos(ID, InsertPos); 3327 assert(!AT && "Shouldn't be in the map!"); 3328 3329 AT = new (*this, TypeAlignment) DecayedType(T, Decayed, Canonical); 3330 Types.push_back(AT); 3331 AdjustedTypes.InsertNode(AT, InsertPos); 3332 return QualType(AT, 0); 3333 } 3334 3335 /// getBlockPointerType - Return the uniqued reference to the type for 3336 /// a pointer to the specified block. 3337 QualType ASTContext::getBlockPointerType(QualType T) const { 3338 assert(T->isFunctionType() && "block of function types only"); 3339 // Unique pointers, to guarantee there is only one block of a particular 3340 // structure. 3341 llvm::FoldingSetNodeID ID; 3342 BlockPointerType::Profile(ID, T); 3343 3344 void *InsertPos = nullptr; 3345 if (BlockPointerType *PT = 3346 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3347 return QualType(PT, 0); 3348 3349 // If the block pointee type isn't canonical, this won't be a canonical 3350 // type either so fill in the canonical type field. 3351 QualType Canonical; 3352 if (!T.isCanonical()) { 3353 Canonical = getBlockPointerType(getCanonicalType(T)); 3354 3355 // Get the new insert position for the node we care about. 3356 BlockPointerType *NewIP = 3357 BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3358 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3359 } 3360 auto *New = new (*this, TypeAlignment) BlockPointerType(T, Canonical); 3361 Types.push_back(New); 3362 BlockPointerTypes.InsertNode(New, InsertPos); 3363 return QualType(New, 0); 3364 } 3365 3366 /// getLValueReferenceType - Return the uniqued reference to the type for an 3367 /// lvalue reference to the specified type. 3368 QualType 3369 ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const { 3370 assert(getCanonicalType(T) != OverloadTy && 3371 "Unresolved overloaded function type"); 3372 3373 // Unique pointers, to guarantee there is only one pointer of a particular 3374 // structure. 3375 llvm::FoldingSetNodeID ID; 3376 ReferenceType::Profile(ID, T, SpelledAsLValue); 3377 3378 void *InsertPos = nullptr; 3379 if (LValueReferenceType *RT = 3380 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3381 return QualType(RT, 0); 3382 3383 const auto *InnerRef = T->getAs<ReferenceType>(); 3384 3385 // If the referencee type isn't canonical, this won't be a canonical type 3386 // either, so fill in the canonical type field. 3387 QualType Canonical; 3388 if (!SpelledAsLValue || InnerRef || !T.isCanonical()) { 3389 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3390 Canonical = getLValueReferenceType(getCanonicalType(PointeeType)); 3391 3392 // Get the new insert position for the node we care about. 3393 LValueReferenceType *NewIP = 3394 LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3395 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3396 } 3397 3398 auto *New = new (*this, TypeAlignment) LValueReferenceType(T, Canonical, 3399 SpelledAsLValue); 3400 Types.push_back(New); 3401 LValueReferenceTypes.InsertNode(New, InsertPos); 3402 3403 return QualType(New, 0); 3404 } 3405 3406 /// getRValueReferenceType - Return the uniqued reference to the type for an 3407 /// rvalue reference to the specified type. 3408 QualType ASTContext::getRValueReferenceType(QualType T) const { 3409 // Unique pointers, to guarantee there is only one pointer of a particular 3410 // structure. 3411 llvm::FoldingSetNodeID ID; 3412 ReferenceType::Profile(ID, T, false); 3413 3414 void *InsertPos = nullptr; 3415 if (RValueReferenceType *RT = 3416 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos)) 3417 return QualType(RT, 0); 3418 3419 const auto *InnerRef = T->getAs<ReferenceType>(); 3420 3421 // If the referencee type isn't canonical, this won't be a canonical type 3422 // either, so fill in the canonical type field. 3423 QualType Canonical; 3424 if (InnerRef || !T.isCanonical()) { 3425 QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T); 3426 Canonical = getRValueReferenceType(getCanonicalType(PointeeType)); 3427 3428 // Get the new insert position for the node we care about. 3429 RValueReferenceType *NewIP = 3430 RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos); 3431 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3432 } 3433 3434 auto *New = new (*this, TypeAlignment) RValueReferenceType(T, Canonical); 3435 Types.push_back(New); 3436 RValueReferenceTypes.InsertNode(New, InsertPos); 3437 return QualType(New, 0); 3438 } 3439 3440 /// getMemberPointerType - Return the uniqued reference to the type for a 3441 /// member pointer to the specified type, in the specified class. 3442 QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const { 3443 // Unique pointers, to guarantee there is only one pointer of a particular 3444 // structure. 3445 llvm::FoldingSetNodeID ID; 3446 MemberPointerType::Profile(ID, T, Cls); 3447 3448 void *InsertPos = nullptr; 3449 if (MemberPointerType *PT = 3450 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 3451 return QualType(PT, 0); 3452 3453 // If the pointee or class type isn't canonical, this won't be a canonical 3454 // type either, so fill in the canonical type field. 3455 QualType Canonical; 3456 if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) { 3457 Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls)); 3458 3459 // Get the new insert position for the node we care about. 3460 MemberPointerType *NewIP = 3461 MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 3462 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3463 } 3464 auto *New = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical); 3465 Types.push_back(New); 3466 MemberPointerTypes.InsertNode(New, InsertPos); 3467 return QualType(New, 0); 3468 } 3469 3470 /// getConstantArrayType - Return the unique reference to the type for an 3471 /// array of the specified element type. 3472 QualType ASTContext::getConstantArrayType(QualType EltTy, 3473 const llvm::APInt &ArySizeIn, 3474 const Expr *SizeExpr, 3475 ArrayType::ArraySizeModifier ASM, 3476 unsigned IndexTypeQuals) const { 3477 assert((EltTy->isDependentType() || 3478 EltTy->isIncompleteType() || EltTy->isConstantSizeType()) && 3479 "Constant array of VLAs is illegal!"); 3480 3481 // We only need the size as part of the type if it's instantiation-dependent. 3482 if (SizeExpr && !SizeExpr->isInstantiationDependent()) 3483 SizeExpr = nullptr; 3484 3485 // Convert the array size into a canonical width matching the pointer size for 3486 // the target. 3487 llvm::APInt ArySize(ArySizeIn); 3488 ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth()); 3489 3490 llvm::FoldingSetNodeID ID; 3491 ConstantArrayType::Profile(ID, *this, EltTy, ArySize, SizeExpr, ASM, 3492 IndexTypeQuals); 3493 3494 void *InsertPos = nullptr; 3495 if (ConstantArrayType *ATP = 3496 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos)) 3497 return QualType(ATP, 0); 3498 3499 // If the element type isn't canonical or has qualifiers, or the array bound 3500 // is instantiation-dependent, this won't be a canonical type either, so fill 3501 // in the canonical type field. 3502 QualType Canon; 3503 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers() || SizeExpr) { 3504 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3505 Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize, nullptr, 3506 ASM, IndexTypeQuals); 3507 Canon = getQualifiedType(Canon, canonSplit.Quals); 3508 3509 // Get the new insert position for the node we care about. 3510 ConstantArrayType *NewIP = 3511 ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos); 3512 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3513 } 3514 3515 void *Mem = Allocate( 3516 ConstantArrayType::totalSizeToAlloc<const Expr *>(SizeExpr ? 1 : 0), 3517 TypeAlignment); 3518 auto *New = new (Mem) 3519 ConstantArrayType(EltTy, Canon, ArySize, SizeExpr, ASM, IndexTypeQuals); 3520 ConstantArrayTypes.InsertNode(New, InsertPos); 3521 Types.push_back(New); 3522 return QualType(New, 0); 3523 } 3524 3525 /// getVariableArrayDecayedType - Turns the given type, which may be 3526 /// variably-modified, into the corresponding type with all the known 3527 /// sizes replaced with [*]. 3528 QualType ASTContext::getVariableArrayDecayedType(QualType type) const { 3529 // Vastly most common case. 3530 if (!type->isVariablyModifiedType()) return type; 3531 3532 QualType result; 3533 3534 SplitQualType split = type.getSplitDesugaredType(); 3535 const Type *ty = split.Ty; 3536 switch (ty->getTypeClass()) { 3537 #define TYPE(Class, Base) 3538 #define ABSTRACT_TYPE(Class, Base) 3539 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3540 #include "clang/AST/TypeNodes.inc" 3541 llvm_unreachable("didn't desugar past all non-canonical types?"); 3542 3543 // These types should never be variably-modified. 3544 case Type::Builtin: 3545 case Type::Complex: 3546 case Type::Vector: 3547 case Type::DependentVector: 3548 case Type::ExtVector: 3549 case Type::DependentSizedExtVector: 3550 case Type::ConstantMatrix: 3551 case Type::DependentSizedMatrix: 3552 case Type::DependentAddressSpace: 3553 case Type::ObjCObject: 3554 case Type::ObjCInterface: 3555 case Type::ObjCObjectPointer: 3556 case Type::Record: 3557 case Type::Enum: 3558 case Type::UnresolvedUsing: 3559 case Type::TypeOfExpr: 3560 case Type::TypeOf: 3561 case Type::Decltype: 3562 case Type::UnaryTransform: 3563 case Type::DependentName: 3564 case Type::InjectedClassName: 3565 case Type::TemplateSpecialization: 3566 case Type::DependentTemplateSpecialization: 3567 case Type::TemplateTypeParm: 3568 case Type::SubstTemplateTypeParmPack: 3569 case Type::Auto: 3570 case Type::DeducedTemplateSpecialization: 3571 case Type::PackExpansion: 3572 case Type::ExtInt: 3573 case Type::DependentExtInt: 3574 llvm_unreachable("type should never be variably-modified"); 3575 3576 // These types can be variably-modified but should never need to 3577 // further decay. 3578 case Type::FunctionNoProto: 3579 case Type::FunctionProto: 3580 case Type::BlockPointer: 3581 case Type::MemberPointer: 3582 case Type::Pipe: 3583 return type; 3584 3585 // These types can be variably-modified. All these modifications 3586 // preserve structure except as noted by comments. 3587 // TODO: if we ever care about optimizing VLAs, there are no-op 3588 // optimizations available here. 3589 case Type::Pointer: 3590 result = getPointerType(getVariableArrayDecayedType( 3591 cast<PointerType>(ty)->getPointeeType())); 3592 break; 3593 3594 case Type::LValueReference: { 3595 const auto *lv = cast<LValueReferenceType>(ty); 3596 result = getLValueReferenceType( 3597 getVariableArrayDecayedType(lv->getPointeeType()), 3598 lv->isSpelledAsLValue()); 3599 break; 3600 } 3601 3602 case Type::RValueReference: { 3603 const auto *lv = cast<RValueReferenceType>(ty); 3604 result = getRValueReferenceType( 3605 getVariableArrayDecayedType(lv->getPointeeType())); 3606 break; 3607 } 3608 3609 case Type::Atomic: { 3610 const auto *at = cast<AtomicType>(ty); 3611 result = getAtomicType(getVariableArrayDecayedType(at->getValueType())); 3612 break; 3613 } 3614 3615 case Type::ConstantArray: { 3616 const auto *cat = cast<ConstantArrayType>(ty); 3617 result = getConstantArrayType( 3618 getVariableArrayDecayedType(cat->getElementType()), 3619 cat->getSize(), 3620 cat->getSizeExpr(), 3621 cat->getSizeModifier(), 3622 cat->getIndexTypeCVRQualifiers()); 3623 break; 3624 } 3625 3626 case Type::DependentSizedArray: { 3627 const auto *dat = cast<DependentSizedArrayType>(ty); 3628 result = getDependentSizedArrayType( 3629 getVariableArrayDecayedType(dat->getElementType()), 3630 dat->getSizeExpr(), 3631 dat->getSizeModifier(), 3632 dat->getIndexTypeCVRQualifiers(), 3633 dat->getBracketsRange()); 3634 break; 3635 } 3636 3637 // Turn incomplete types into [*] types. 3638 case Type::IncompleteArray: { 3639 const auto *iat = cast<IncompleteArrayType>(ty); 3640 result = getVariableArrayType( 3641 getVariableArrayDecayedType(iat->getElementType()), 3642 /*size*/ nullptr, 3643 ArrayType::Normal, 3644 iat->getIndexTypeCVRQualifiers(), 3645 SourceRange()); 3646 break; 3647 } 3648 3649 // Turn VLA types into [*] types. 3650 case Type::VariableArray: { 3651 const auto *vat = cast<VariableArrayType>(ty); 3652 result = getVariableArrayType( 3653 getVariableArrayDecayedType(vat->getElementType()), 3654 /*size*/ nullptr, 3655 ArrayType::Star, 3656 vat->getIndexTypeCVRQualifiers(), 3657 vat->getBracketsRange()); 3658 break; 3659 } 3660 } 3661 3662 // Apply the top-level qualifiers from the original. 3663 return getQualifiedType(result, split.Quals); 3664 } 3665 3666 /// getVariableArrayType - Returns a non-unique reference to the type for a 3667 /// variable array of the specified element type. 3668 QualType ASTContext::getVariableArrayType(QualType EltTy, 3669 Expr *NumElts, 3670 ArrayType::ArraySizeModifier ASM, 3671 unsigned IndexTypeQuals, 3672 SourceRange Brackets) const { 3673 // Since we don't unique expressions, it isn't possible to unique VLA's 3674 // that have an expression provided for their size. 3675 QualType Canon; 3676 3677 // Be sure to pull qualifiers off the element type. 3678 if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) { 3679 SplitQualType canonSplit = getCanonicalType(EltTy).split(); 3680 Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM, 3681 IndexTypeQuals, Brackets); 3682 Canon = getQualifiedType(Canon, canonSplit.Quals); 3683 } 3684 3685 auto *New = new (*this, TypeAlignment) 3686 VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets); 3687 3688 VariableArrayTypes.push_back(New); 3689 Types.push_back(New); 3690 return QualType(New, 0); 3691 } 3692 3693 /// getDependentSizedArrayType - Returns a non-unique reference to 3694 /// the type for a dependently-sized array of the specified element 3695 /// type. 3696 QualType ASTContext::getDependentSizedArrayType(QualType elementType, 3697 Expr *numElements, 3698 ArrayType::ArraySizeModifier ASM, 3699 unsigned elementTypeQuals, 3700 SourceRange brackets) const { 3701 assert((!numElements || numElements->isTypeDependent() || 3702 numElements->isValueDependent()) && 3703 "Size must be type- or value-dependent!"); 3704 3705 // Dependently-sized array types that do not have a specified number 3706 // of elements will have their sizes deduced from a dependent 3707 // initializer. We do no canonicalization here at all, which is okay 3708 // because they can't be used in most locations. 3709 if (!numElements) { 3710 auto *newType 3711 = new (*this, TypeAlignment) 3712 DependentSizedArrayType(*this, elementType, QualType(), 3713 numElements, ASM, elementTypeQuals, 3714 brackets); 3715 Types.push_back(newType); 3716 return QualType(newType, 0); 3717 } 3718 3719 // Otherwise, we actually build a new type every time, but we 3720 // also build a canonical type. 3721 3722 SplitQualType canonElementType = getCanonicalType(elementType).split(); 3723 3724 void *insertPos = nullptr; 3725 llvm::FoldingSetNodeID ID; 3726 DependentSizedArrayType::Profile(ID, *this, 3727 QualType(canonElementType.Ty, 0), 3728 ASM, elementTypeQuals, numElements); 3729 3730 // Look for an existing type with these properties. 3731 DependentSizedArrayType *canonTy = 3732 DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3733 3734 // If we don't have one, build one. 3735 if (!canonTy) { 3736 canonTy = new (*this, TypeAlignment) 3737 DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0), 3738 QualType(), numElements, ASM, elementTypeQuals, 3739 brackets); 3740 DependentSizedArrayTypes.InsertNode(canonTy, insertPos); 3741 Types.push_back(canonTy); 3742 } 3743 3744 // Apply qualifiers from the element type to the array. 3745 QualType canon = getQualifiedType(QualType(canonTy,0), 3746 canonElementType.Quals); 3747 3748 // If we didn't need extra canonicalization for the element type or the size 3749 // expression, then just use that as our result. 3750 if (QualType(canonElementType.Ty, 0) == elementType && 3751 canonTy->getSizeExpr() == numElements) 3752 return canon; 3753 3754 // Otherwise, we need to build a type which follows the spelling 3755 // of the element type. 3756 auto *sugaredType 3757 = new (*this, TypeAlignment) 3758 DependentSizedArrayType(*this, elementType, canon, numElements, 3759 ASM, elementTypeQuals, brackets); 3760 Types.push_back(sugaredType); 3761 return QualType(sugaredType, 0); 3762 } 3763 3764 QualType ASTContext::getIncompleteArrayType(QualType elementType, 3765 ArrayType::ArraySizeModifier ASM, 3766 unsigned elementTypeQuals) const { 3767 llvm::FoldingSetNodeID ID; 3768 IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals); 3769 3770 void *insertPos = nullptr; 3771 if (IncompleteArrayType *iat = 3772 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos)) 3773 return QualType(iat, 0); 3774 3775 // If the element type isn't canonical, this won't be a canonical type 3776 // either, so fill in the canonical type field. We also have to pull 3777 // qualifiers off the element type. 3778 QualType canon; 3779 3780 if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) { 3781 SplitQualType canonSplit = getCanonicalType(elementType).split(); 3782 canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0), 3783 ASM, elementTypeQuals); 3784 canon = getQualifiedType(canon, canonSplit.Quals); 3785 3786 // Get the new insert position for the node we care about. 3787 IncompleteArrayType *existing = 3788 IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos); 3789 assert(!existing && "Shouldn't be in the map!"); (void) existing; 3790 } 3791 3792 auto *newType = new (*this, TypeAlignment) 3793 IncompleteArrayType(elementType, canon, ASM, elementTypeQuals); 3794 3795 IncompleteArrayTypes.InsertNode(newType, insertPos); 3796 Types.push_back(newType); 3797 return QualType(newType, 0); 3798 } 3799 3800 ASTContext::BuiltinVectorTypeInfo 3801 ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const { 3802 #define SVE_INT_ELTTY(BITS, ELTS, SIGNED, NUMVECTORS) \ 3803 {getIntTypeForBitwidth(BITS, SIGNED), llvm::ElementCount::getScalable(ELTS), \ 3804 NUMVECTORS}; 3805 3806 #define SVE_ELTTY(ELTTY, ELTS, NUMVECTORS) \ 3807 {ELTTY, llvm::ElementCount::getScalable(ELTS), NUMVECTORS}; 3808 3809 switch (Ty->getKind()) { 3810 default: 3811 llvm_unreachable("Unsupported builtin vector type"); 3812 case BuiltinType::SveInt8: 3813 return SVE_INT_ELTTY(8, 16, true, 1); 3814 case BuiltinType::SveUint8: 3815 return SVE_INT_ELTTY(8, 16, false, 1); 3816 case BuiltinType::SveInt8x2: 3817 return SVE_INT_ELTTY(8, 16, true, 2); 3818 case BuiltinType::SveUint8x2: 3819 return SVE_INT_ELTTY(8, 16, false, 2); 3820 case BuiltinType::SveInt8x3: 3821 return SVE_INT_ELTTY(8, 16, true, 3); 3822 case BuiltinType::SveUint8x3: 3823 return SVE_INT_ELTTY(8, 16, false, 3); 3824 case BuiltinType::SveInt8x4: 3825 return SVE_INT_ELTTY(8, 16, true, 4); 3826 case BuiltinType::SveUint8x4: 3827 return SVE_INT_ELTTY(8, 16, false, 4); 3828 case BuiltinType::SveInt16: 3829 return SVE_INT_ELTTY(16, 8, true, 1); 3830 case BuiltinType::SveUint16: 3831 return SVE_INT_ELTTY(16, 8, false, 1); 3832 case BuiltinType::SveInt16x2: 3833 return SVE_INT_ELTTY(16, 8, true, 2); 3834 case BuiltinType::SveUint16x2: 3835 return SVE_INT_ELTTY(16, 8, false, 2); 3836 case BuiltinType::SveInt16x3: 3837 return SVE_INT_ELTTY(16, 8, true, 3); 3838 case BuiltinType::SveUint16x3: 3839 return SVE_INT_ELTTY(16, 8, false, 3); 3840 case BuiltinType::SveInt16x4: 3841 return SVE_INT_ELTTY(16, 8, true, 4); 3842 case BuiltinType::SveUint16x4: 3843 return SVE_INT_ELTTY(16, 8, false, 4); 3844 case BuiltinType::SveInt32: 3845 return SVE_INT_ELTTY(32, 4, true, 1); 3846 case BuiltinType::SveUint32: 3847 return SVE_INT_ELTTY(32, 4, false, 1); 3848 case BuiltinType::SveInt32x2: 3849 return SVE_INT_ELTTY(32, 4, true, 2); 3850 case BuiltinType::SveUint32x2: 3851 return SVE_INT_ELTTY(32, 4, false, 2); 3852 case BuiltinType::SveInt32x3: 3853 return SVE_INT_ELTTY(32, 4, true, 3); 3854 case BuiltinType::SveUint32x3: 3855 return SVE_INT_ELTTY(32, 4, false, 3); 3856 case BuiltinType::SveInt32x4: 3857 return SVE_INT_ELTTY(32, 4, true, 4); 3858 case BuiltinType::SveUint32x4: 3859 return SVE_INT_ELTTY(32, 4, false, 4); 3860 case BuiltinType::SveInt64: 3861 return SVE_INT_ELTTY(64, 2, true, 1); 3862 case BuiltinType::SveUint64: 3863 return SVE_INT_ELTTY(64, 2, false, 1); 3864 case BuiltinType::SveInt64x2: 3865 return SVE_INT_ELTTY(64, 2, true, 2); 3866 case BuiltinType::SveUint64x2: 3867 return SVE_INT_ELTTY(64, 2, false, 2); 3868 case BuiltinType::SveInt64x3: 3869 return SVE_INT_ELTTY(64, 2, true, 3); 3870 case BuiltinType::SveUint64x3: 3871 return SVE_INT_ELTTY(64, 2, false, 3); 3872 case BuiltinType::SveInt64x4: 3873 return SVE_INT_ELTTY(64, 2, true, 4); 3874 case BuiltinType::SveUint64x4: 3875 return SVE_INT_ELTTY(64, 2, false, 4); 3876 case BuiltinType::SveBool: 3877 return SVE_ELTTY(BoolTy, 16, 1); 3878 case BuiltinType::SveFloat16: 3879 return SVE_ELTTY(HalfTy, 8, 1); 3880 case BuiltinType::SveFloat16x2: 3881 return SVE_ELTTY(HalfTy, 8, 2); 3882 case BuiltinType::SveFloat16x3: 3883 return SVE_ELTTY(HalfTy, 8, 3); 3884 case BuiltinType::SveFloat16x4: 3885 return SVE_ELTTY(HalfTy, 8, 4); 3886 case BuiltinType::SveFloat32: 3887 return SVE_ELTTY(FloatTy, 4, 1); 3888 case BuiltinType::SveFloat32x2: 3889 return SVE_ELTTY(FloatTy, 4, 2); 3890 case BuiltinType::SveFloat32x3: 3891 return SVE_ELTTY(FloatTy, 4, 3); 3892 case BuiltinType::SveFloat32x4: 3893 return SVE_ELTTY(FloatTy, 4, 4); 3894 case BuiltinType::SveFloat64: 3895 return SVE_ELTTY(DoubleTy, 2, 1); 3896 case BuiltinType::SveFloat64x2: 3897 return SVE_ELTTY(DoubleTy, 2, 2); 3898 case BuiltinType::SveFloat64x3: 3899 return SVE_ELTTY(DoubleTy, 2, 3); 3900 case BuiltinType::SveFloat64x4: 3901 return SVE_ELTTY(DoubleTy, 2, 4); 3902 case BuiltinType::SveBFloat16: 3903 return SVE_ELTTY(BFloat16Ty, 8, 1); 3904 case BuiltinType::SveBFloat16x2: 3905 return SVE_ELTTY(BFloat16Ty, 8, 2); 3906 case BuiltinType::SveBFloat16x3: 3907 return SVE_ELTTY(BFloat16Ty, 8, 3); 3908 case BuiltinType::SveBFloat16x4: 3909 return SVE_ELTTY(BFloat16Ty, 8, 4); 3910 #define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \ 3911 IsSigned) \ 3912 case BuiltinType::Id: \ 3913 return {getIntTypeForBitwidth(ElBits, IsSigned), \ 3914 llvm::ElementCount::getScalable(NumEls), NF}; 3915 #define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \ 3916 case BuiltinType::Id: \ 3917 return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \ 3918 llvm::ElementCount::getScalable(NumEls), NF}; 3919 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3920 case BuiltinType::Id: \ 3921 return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1}; 3922 #include "clang/Basic/RISCVVTypes.def" 3923 } 3924 } 3925 3926 /// getScalableVectorType - Return the unique reference to a scalable vector 3927 /// type of the specified element type and size. VectorType must be a built-in 3928 /// type. 3929 QualType ASTContext::getScalableVectorType(QualType EltTy, 3930 unsigned NumElts) const { 3931 if (Target->hasAArch64SVETypes()) { 3932 uint64_t EltTySize = getTypeSize(EltTy); 3933 #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ 3934 IsSigned, IsFP, IsBF) \ 3935 if (!EltTy->isBooleanType() && \ 3936 ((EltTy->hasIntegerRepresentation() && \ 3937 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3938 (EltTy->hasFloatingRepresentation() && !EltTy->isBFloat16Type() && \ 3939 IsFP && !IsBF) || \ 3940 (EltTy->hasFloatingRepresentation() && EltTy->isBFloat16Type() && \ 3941 IsBF && !IsFP)) && \ 3942 EltTySize == ElBits && NumElts == NumEls) { \ 3943 return SingletonId; \ 3944 } 3945 #define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls) \ 3946 if (EltTy->isBooleanType() && NumElts == NumEls) \ 3947 return SingletonId; 3948 #include "clang/Basic/AArch64SVEACLETypes.def" 3949 } else if (Target->hasRISCVVTypes()) { 3950 uint64_t EltTySize = getTypeSize(EltTy); 3951 #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ 3952 IsFP) \ 3953 if (!EltTy->isBooleanType() && \ 3954 ((EltTy->hasIntegerRepresentation() && \ 3955 EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ 3956 (EltTy->hasFloatingRepresentation() && IsFP)) && \ 3957 EltTySize == ElBits && NumElts == NumEls) \ 3958 return SingletonId; 3959 #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ 3960 if (EltTy->isBooleanType() && NumElts == NumEls) \ 3961 return SingletonId; 3962 #include "clang/Basic/RISCVVTypes.def" 3963 } 3964 return QualType(); 3965 } 3966 3967 /// getVectorType - Return the unique reference to a vector type of 3968 /// the specified element type and size. VectorType must be a built-in type. 3969 QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts, 3970 VectorType::VectorKind VecKind) const { 3971 assert(vecType->isBuiltinType()); 3972 3973 // Check if we've already instantiated a vector of this type. 3974 llvm::FoldingSetNodeID ID; 3975 VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind); 3976 3977 void *InsertPos = nullptr; 3978 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 3979 return QualType(VTP, 0); 3980 3981 // If the element type isn't canonical, this won't be a canonical type either, 3982 // so fill in the canonical type field. 3983 QualType Canonical; 3984 if (!vecType.isCanonical()) { 3985 Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind); 3986 3987 // Get the new insert position for the node we care about. 3988 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 3989 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 3990 } 3991 auto *New = new (*this, TypeAlignment) 3992 VectorType(vecType, NumElts, Canonical, VecKind); 3993 VectorTypes.InsertNode(New, InsertPos); 3994 Types.push_back(New); 3995 return QualType(New, 0); 3996 } 3997 3998 QualType 3999 ASTContext::getDependentVectorType(QualType VecType, Expr *SizeExpr, 4000 SourceLocation AttrLoc, 4001 VectorType::VectorKind VecKind) const { 4002 llvm::FoldingSetNodeID ID; 4003 DependentVectorType::Profile(ID, *this, getCanonicalType(VecType), SizeExpr, 4004 VecKind); 4005 void *InsertPos = nullptr; 4006 DependentVectorType *Canon = 4007 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4008 DependentVectorType *New; 4009 4010 if (Canon) { 4011 New = new (*this, TypeAlignment) DependentVectorType( 4012 *this, VecType, QualType(Canon, 0), SizeExpr, AttrLoc, VecKind); 4013 } else { 4014 QualType CanonVecTy = getCanonicalType(VecType); 4015 if (CanonVecTy == VecType) { 4016 New = new (*this, TypeAlignment) DependentVectorType( 4017 *this, VecType, QualType(), SizeExpr, AttrLoc, VecKind); 4018 4019 DependentVectorType *CanonCheck = 4020 DependentVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4021 assert(!CanonCheck && 4022 "Dependent-sized vector_size canonical type broken"); 4023 (void)CanonCheck; 4024 DependentVectorTypes.InsertNode(New, InsertPos); 4025 } else { 4026 QualType CanonTy = getDependentVectorType(CanonVecTy, SizeExpr, 4027 SourceLocation(), VecKind); 4028 New = new (*this, TypeAlignment) DependentVectorType( 4029 *this, VecType, CanonTy, SizeExpr, AttrLoc, VecKind); 4030 } 4031 } 4032 4033 Types.push_back(New); 4034 return QualType(New, 0); 4035 } 4036 4037 /// getExtVectorType - Return the unique reference to an extended vector type of 4038 /// the specified element type and size. VectorType must be a built-in type. 4039 QualType 4040 ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const { 4041 assert(vecType->isBuiltinType() || vecType->isDependentType()); 4042 4043 // Check if we've already instantiated a vector of this type. 4044 llvm::FoldingSetNodeID ID; 4045 VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, 4046 VectorType::GenericVector); 4047 void *InsertPos = nullptr; 4048 if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos)) 4049 return QualType(VTP, 0); 4050 4051 // If the element type isn't canonical, this won't be a canonical type either, 4052 // so fill in the canonical type field. 4053 QualType Canonical; 4054 if (!vecType.isCanonical()) { 4055 Canonical = getExtVectorType(getCanonicalType(vecType), NumElts); 4056 4057 // Get the new insert position for the node we care about. 4058 VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4059 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4060 } 4061 auto *New = new (*this, TypeAlignment) 4062 ExtVectorType(vecType, NumElts, Canonical); 4063 VectorTypes.InsertNode(New, InsertPos); 4064 Types.push_back(New); 4065 return QualType(New, 0); 4066 } 4067 4068 QualType 4069 ASTContext::getDependentSizedExtVectorType(QualType vecType, 4070 Expr *SizeExpr, 4071 SourceLocation AttrLoc) const { 4072 llvm::FoldingSetNodeID ID; 4073 DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType), 4074 SizeExpr); 4075 4076 void *InsertPos = nullptr; 4077 DependentSizedExtVectorType *Canon 4078 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4079 DependentSizedExtVectorType *New; 4080 if (Canon) { 4081 // We already have a canonical version of this array type; use it as 4082 // the canonical type for a newly-built type. 4083 New = new (*this, TypeAlignment) 4084 DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0), 4085 SizeExpr, AttrLoc); 4086 } else { 4087 QualType CanonVecTy = getCanonicalType(vecType); 4088 if (CanonVecTy == vecType) { 4089 New = new (*this, TypeAlignment) 4090 DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr, 4091 AttrLoc); 4092 4093 DependentSizedExtVectorType *CanonCheck 4094 = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos); 4095 assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken"); 4096 (void)CanonCheck; 4097 DependentSizedExtVectorTypes.InsertNode(New, InsertPos); 4098 } else { 4099 QualType CanonExtTy = getDependentSizedExtVectorType(CanonVecTy, SizeExpr, 4100 SourceLocation()); 4101 New = new (*this, TypeAlignment) DependentSizedExtVectorType( 4102 *this, vecType, CanonExtTy, SizeExpr, AttrLoc); 4103 } 4104 } 4105 4106 Types.push_back(New); 4107 return QualType(New, 0); 4108 } 4109 4110 QualType ASTContext::getConstantMatrixType(QualType ElementTy, unsigned NumRows, 4111 unsigned NumColumns) const { 4112 llvm::FoldingSetNodeID ID; 4113 ConstantMatrixType::Profile(ID, ElementTy, NumRows, NumColumns, 4114 Type::ConstantMatrix); 4115 4116 assert(MatrixType::isValidElementType(ElementTy) && 4117 "need a valid element type"); 4118 assert(ConstantMatrixType::isDimensionValid(NumRows) && 4119 ConstantMatrixType::isDimensionValid(NumColumns) && 4120 "need valid matrix dimensions"); 4121 void *InsertPos = nullptr; 4122 if (ConstantMatrixType *MTP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos)) 4123 return QualType(MTP, 0); 4124 4125 QualType Canonical; 4126 if (!ElementTy.isCanonical()) { 4127 Canonical = 4128 getConstantMatrixType(getCanonicalType(ElementTy), NumRows, NumColumns); 4129 4130 ConstantMatrixType *NewIP = MatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4131 assert(!NewIP && "Matrix type shouldn't already exist in the map"); 4132 (void)NewIP; 4133 } 4134 4135 auto *New = new (*this, TypeAlignment) 4136 ConstantMatrixType(ElementTy, NumRows, NumColumns, Canonical); 4137 MatrixTypes.InsertNode(New, InsertPos); 4138 Types.push_back(New); 4139 return QualType(New, 0); 4140 } 4141 4142 QualType ASTContext::getDependentSizedMatrixType(QualType ElementTy, 4143 Expr *RowExpr, 4144 Expr *ColumnExpr, 4145 SourceLocation AttrLoc) const { 4146 QualType CanonElementTy = getCanonicalType(ElementTy); 4147 llvm::FoldingSetNodeID ID; 4148 DependentSizedMatrixType::Profile(ID, *this, CanonElementTy, RowExpr, 4149 ColumnExpr); 4150 4151 void *InsertPos = nullptr; 4152 DependentSizedMatrixType *Canon = 4153 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4154 4155 if (!Canon) { 4156 Canon = new (*this, TypeAlignment) DependentSizedMatrixType( 4157 *this, CanonElementTy, QualType(), RowExpr, ColumnExpr, AttrLoc); 4158 #ifndef NDEBUG 4159 DependentSizedMatrixType *CanonCheck = 4160 DependentSizedMatrixTypes.FindNodeOrInsertPos(ID, InsertPos); 4161 assert(!CanonCheck && "Dependent-sized matrix canonical type broken"); 4162 #endif 4163 DependentSizedMatrixTypes.InsertNode(Canon, InsertPos); 4164 Types.push_back(Canon); 4165 } 4166 4167 // Already have a canonical version of the matrix type 4168 // 4169 // If it exactly matches the requested type, use it directly. 4170 if (Canon->getElementType() == ElementTy && Canon->getRowExpr() == RowExpr && 4171 Canon->getRowExpr() == ColumnExpr) 4172 return QualType(Canon, 0); 4173 4174 // Use Canon as the canonical type for newly-built type. 4175 DependentSizedMatrixType *New = new (*this, TypeAlignment) 4176 DependentSizedMatrixType(*this, ElementTy, QualType(Canon, 0), RowExpr, 4177 ColumnExpr, AttrLoc); 4178 Types.push_back(New); 4179 return QualType(New, 0); 4180 } 4181 4182 QualType ASTContext::getDependentAddressSpaceType(QualType PointeeType, 4183 Expr *AddrSpaceExpr, 4184 SourceLocation AttrLoc) const { 4185 assert(AddrSpaceExpr->isInstantiationDependent()); 4186 4187 QualType canonPointeeType = getCanonicalType(PointeeType); 4188 4189 void *insertPos = nullptr; 4190 llvm::FoldingSetNodeID ID; 4191 DependentAddressSpaceType::Profile(ID, *this, canonPointeeType, 4192 AddrSpaceExpr); 4193 4194 DependentAddressSpaceType *canonTy = 4195 DependentAddressSpaceTypes.FindNodeOrInsertPos(ID, insertPos); 4196 4197 if (!canonTy) { 4198 canonTy = new (*this, TypeAlignment) 4199 DependentAddressSpaceType(*this, canonPointeeType, 4200 QualType(), AddrSpaceExpr, AttrLoc); 4201 DependentAddressSpaceTypes.InsertNode(canonTy, insertPos); 4202 Types.push_back(canonTy); 4203 } 4204 4205 if (canonPointeeType == PointeeType && 4206 canonTy->getAddrSpaceExpr() == AddrSpaceExpr) 4207 return QualType(canonTy, 0); 4208 4209 auto *sugaredType 4210 = new (*this, TypeAlignment) 4211 DependentAddressSpaceType(*this, PointeeType, QualType(canonTy, 0), 4212 AddrSpaceExpr, AttrLoc); 4213 Types.push_back(sugaredType); 4214 return QualType(sugaredType, 0); 4215 } 4216 4217 /// Determine whether \p T is canonical as the result type of a function. 4218 static bool isCanonicalResultType(QualType T) { 4219 return T.isCanonical() && 4220 (T.getObjCLifetime() == Qualifiers::OCL_None || 4221 T.getObjCLifetime() == Qualifiers::OCL_ExplicitNone); 4222 } 4223 4224 /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'. 4225 QualType 4226 ASTContext::getFunctionNoProtoType(QualType ResultTy, 4227 const FunctionType::ExtInfo &Info) const { 4228 // Unique functions, to guarantee there is only one function of a particular 4229 // structure. 4230 llvm::FoldingSetNodeID ID; 4231 FunctionNoProtoType::Profile(ID, ResultTy, Info); 4232 4233 void *InsertPos = nullptr; 4234 if (FunctionNoProtoType *FT = 4235 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) 4236 return QualType(FT, 0); 4237 4238 QualType Canonical; 4239 if (!isCanonicalResultType(ResultTy)) { 4240 Canonical = 4241 getFunctionNoProtoType(getCanonicalFunctionResultType(ResultTy), Info); 4242 4243 // Get the new insert position for the node we care about. 4244 FunctionNoProtoType *NewIP = 4245 FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4246 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4247 } 4248 4249 auto *New = new (*this, TypeAlignment) 4250 FunctionNoProtoType(ResultTy, Canonical, Info); 4251 Types.push_back(New); 4252 FunctionNoProtoTypes.InsertNode(New, InsertPos); 4253 return QualType(New, 0); 4254 } 4255 4256 CanQualType 4257 ASTContext::getCanonicalFunctionResultType(QualType ResultType) const { 4258 CanQualType CanResultType = getCanonicalType(ResultType); 4259 4260 // Canonical result types do not have ARC lifetime qualifiers. 4261 if (CanResultType.getQualifiers().hasObjCLifetime()) { 4262 Qualifiers Qs = CanResultType.getQualifiers(); 4263 Qs.removeObjCLifetime(); 4264 return CanQualType::CreateUnsafe( 4265 getQualifiedType(CanResultType.getUnqualifiedType(), Qs)); 4266 } 4267 4268 return CanResultType; 4269 } 4270 4271 static bool isCanonicalExceptionSpecification( 4272 const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) { 4273 if (ESI.Type == EST_None) 4274 return true; 4275 if (!NoexceptInType) 4276 return false; 4277 4278 // C++17 onwards: exception specification is part of the type, as a simple 4279 // boolean "can this function type throw". 4280 if (ESI.Type == EST_BasicNoexcept) 4281 return true; 4282 4283 // A noexcept(expr) specification is (possibly) canonical if expr is 4284 // value-dependent. 4285 if (ESI.Type == EST_DependentNoexcept) 4286 return true; 4287 4288 // A dynamic exception specification is canonical if it only contains pack 4289 // expansions (so we can't tell whether it's non-throwing) and all its 4290 // contained types are canonical. 4291 if (ESI.Type == EST_Dynamic) { 4292 bool AnyPackExpansions = false; 4293 for (QualType ET : ESI.Exceptions) { 4294 if (!ET.isCanonical()) 4295 return false; 4296 if (ET->getAs<PackExpansionType>()) 4297 AnyPackExpansions = true; 4298 } 4299 return AnyPackExpansions; 4300 } 4301 4302 return false; 4303 } 4304 4305 QualType ASTContext::getFunctionTypeInternal( 4306 QualType ResultTy, ArrayRef<QualType> ArgArray, 4307 const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const { 4308 size_t NumArgs = ArgArray.size(); 4309 4310 // Unique functions, to guarantee there is only one function of a particular 4311 // structure. 4312 llvm::FoldingSetNodeID ID; 4313 FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI, 4314 *this, true); 4315 4316 QualType Canonical; 4317 bool Unique = false; 4318 4319 void *InsertPos = nullptr; 4320 if (FunctionProtoType *FPT = 4321 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) { 4322 QualType Existing = QualType(FPT, 0); 4323 4324 // If we find a pre-existing equivalent FunctionProtoType, we can just reuse 4325 // it so long as our exception specification doesn't contain a dependent 4326 // noexcept expression, or we're just looking for a canonical type. 4327 // Otherwise, we're going to need to create a type 4328 // sugar node to hold the concrete expression. 4329 if (OnlyWantCanonical || !isComputedNoexcept(EPI.ExceptionSpec.Type) || 4330 EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr()) 4331 return Existing; 4332 4333 // We need a new type sugar node for this one, to hold the new noexcept 4334 // expression. We do no canonicalization here, but that's OK since we don't 4335 // expect to see the same noexcept expression much more than once. 4336 Canonical = getCanonicalType(Existing); 4337 Unique = true; 4338 } 4339 4340 bool NoexceptInType = getLangOpts().CPlusPlus17; 4341 bool IsCanonicalExceptionSpec = 4342 isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType); 4343 4344 // Determine whether the type being created is already canonical or not. 4345 bool isCanonical = !Unique && IsCanonicalExceptionSpec && 4346 isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn; 4347 for (unsigned i = 0; i != NumArgs && isCanonical; ++i) 4348 if (!ArgArray[i].isCanonicalAsParam()) 4349 isCanonical = false; 4350 4351 if (OnlyWantCanonical) 4352 assert(isCanonical && 4353 "given non-canonical parameters constructing canonical type"); 4354 4355 // If this type isn't canonical, get the canonical version of it if we don't 4356 // already have it. The exception spec is only partially part of the 4357 // canonical type, and only in C++17 onwards. 4358 if (!isCanonical && Canonical.isNull()) { 4359 SmallVector<QualType, 16> CanonicalArgs; 4360 CanonicalArgs.reserve(NumArgs); 4361 for (unsigned i = 0; i != NumArgs; ++i) 4362 CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i])); 4363 4364 llvm::SmallVector<QualType, 8> ExceptionTypeStorage; 4365 FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI; 4366 CanonicalEPI.HasTrailingReturn = false; 4367 4368 if (IsCanonicalExceptionSpec) { 4369 // Exception spec is already OK. 4370 } else if (NoexceptInType) { 4371 switch (EPI.ExceptionSpec.Type) { 4372 case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated: 4373 // We don't know yet. It shouldn't matter what we pick here; no-one 4374 // should ever look at this. 4375 LLVM_FALLTHROUGH; 4376 case EST_None: case EST_MSAny: case EST_NoexceptFalse: 4377 CanonicalEPI.ExceptionSpec.Type = EST_None; 4378 break; 4379 4380 // A dynamic exception specification is almost always "not noexcept", 4381 // with the exception that a pack expansion might expand to no types. 4382 case EST_Dynamic: { 4383 bool AnyPacks = false; 4384 for (QualType ET : EPI.ExceptionSpec.Exceptions) { 4385 if (ET->getAs<PackExpansionType>()) 4386 AnyPacks = true; 4387 ExceptionTypeStorage.push_back(getCanonicalType(ET)); 4388 } 4389 if (!AnyPacks) 4390 CanonicalEPI.ExceptionSpec.Type = EST_None; 4391 else { 4392 CanonicalEPI.ExceptionSpec.Type = EST_Dynamic; 4393 CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage; 4394 } 4395 break; 4396 } 4397 4398 case EST_DynamicNone: 4399 case EST_BasicNoexcept: 4400 case EST_NoexceptTrue: 4401 case EST_NoThrow: 4402 CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept; 4403 break; 4404 4405 case EST_DependentNoexcept: 4406 llvm_unreachable("dependent noexcept is already canonical"); 4407 } 4408 } else { 4409 CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo(); 4410 } 4411 4412 // Adjust the canonical function result type. 4413 CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy); 4414 Canonical = 4415 getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true); 4416 4417 // Get the new insert position for the node we care about. 4418 FunctionProtoType *NewIP = 4419 FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos); 4420 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 4421 } 4422 4423 // Compute the needed size to hold this FunctionProtoType and the 4424 // various trailing objects. 4425 auto ESH = FunctionProtoType::getExceptionSpecSize( 4426 EPI.ExceptionSpec.Type, EPI.ExceptionSpec.Exceptions.size()); 4427 size_t Size = FunctionProtoType::totalSizeToAlloc< 4428 QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields, 4429 FunctionType::ExceptionType, Expr *, FunctionDecl *, 4430 FunctionProtoType::ExtParameterInfo, Qualifiers>( 4431 NumArgs, EPI.Variadic, 4432 FunctionProtoType::hasExtraBitfields(EPI.ExceptionSpec.Type), 4433 ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr, 4434 EPI.ExtParameterInfos ? NumArgs : 0, 4435 EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0); 4436 4437 auto *FTP = (FunctionProtoType *)Allocate(Size, TypeAlignment); 4438 FunctionProtoType::ExtProtoInfo newEPI = EPI; 4439 new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI); 4440 Types.push_back(FTP); 4441 if (!Unique) 4442 FunctionProtoTypes.InsertNode(FTP, InsertPos); 4443 return QualType(FTP, 0); 4444 } 4445 4446 QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const { 4447 llvm::FoldingSetNodeID ID; 4448 PipeType::Profile(ID, T, ReadOnly); 4449 4450 void *InsertPos = nullptr; 4451 if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos)) 4452 return QualType(PT, 0); 4453 4454 // If the pipe element type isn't canonical, this won't be a canonical type 4455 // either, so fill in the canonical type field. 4456 QualType Canonical; 4457 if (!T.isCanonical()) { 4458 Canonical = getPipeType(getCanonicalType(T), ReadOnly); 4459 4460 // Get the new insert position for the node we care about. 4461 PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos); 4462 assert(!NewIP && "Shouldn't be in the map!"); 4463 (void)NewIP; 4464 } 4465 auto *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly); 4466 Types.push_back(New); 4467 PipeTypes.InsertNode(New, InsertPos); 4468 return QualType(New, 0); 4469 } 4470 4471 QualType ASTContext::adjustStringLiteralBaseType(QualType Ty) const { 4472 // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. 4473 return LangOpts.OpenCL ? getAddrSpaceQualType(Ty, LangAS::opencl_constant) 4474 : Ty; 4475 } 4476 4477 QualType ASTContext::getReadPipeType(QualType T) const { 4478 return getPipeType(T, true); 4479 } 4480 4481 QualType ASTContext::getWritePipeType(QualType T) const { 4482 return getPipeType(T, false); 4483 } 4484 4485 QualType ASTContext::getExtIntType(bool IsUnsigned, unsigned NumBits) const { 4486 llvm::FoldingSetNodeID ID; 4487 ExtIntType::Profile(ID, IsUnsigned, NumBits); 4488 4489 void *InsertPos = nullptr; 4490 if (ExtIntType *EIT = ExtIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4491 return QualType(EIT, 0); 4492 4493 auto *New = new (*this, TypeAlignment) ExtIntType(IsUnsigned, NumBits); 4494 ExtIntTypes.InsertNode(New, InsertPos); 4495 Types.push_back(New); 4496 return QualType(New, 0); 4497 } 4498 4499 QualType ASTContext::getDependentExtIntType(bool IsUnsigned, 4500 Expr *NumBitsExpr) const { 4501 assert(NumBitsExpr->isInstantiationDependent() && "Only good for dependent"); 4502 llvm::FoldingSetNodeID ID; 4503 DependentExtIntType::Profile(ID, *this, IsUnsigned, NumBitsExpr); 4504 4505 void *InsertPos = nullptr; 4506 if (DependentExtIntType *Existing = 4507 DependentExtIntTypes.FindNodeOrInsertPos(ID, InsertPos)) 4508 return QualType(Existing, 0); 4509 4510 auto *New = new (*this, TypeAlignment) 4511 DependentExtIntType(*this, IsUnsigned, NumBitsExpr); 4512 DependentExtIntTypes.InsertNode(New, InsertPos); 4513 4514 Types.push_back(New); 4515 return QualType(New, 0); 4516 } 4517 4518 #ifndef NDEBUG 4519 static bool NeedsInjectedClassNameType(const RecordDecl *D) { 4520 if (!isa<CXXRecordDecl>(D)) return false; 4521 const auto *RD = cast<CXXRecordDecl>(D); 4522 if (isa<ClassTemplatePartialSpecializationDecl>(RD)) 4523 return true; 4524 if (RD->getDescribedClassTemplate() && 4525 !isa<ClassTemplateSpecializationDecl>(RD)) 4526 return true; 4527 return false; 4528 } 4529 #endif 4530 4531 /// getInjectedClassNameType - Return the unique reference to the 4532 /// injected class name type for the specified templated declaration. 4533 QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl, 4534 QualType TST) const { 4535 assert(NeedsInjectedClassNameType(Decl)); 4536 if (Decl->TypeForDecl) { 4537 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4538 } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) { 4539 assert(PrevDecl->TypeForDecl && "previous declaration has no type"); 4540 Decl->TypeForDecl = PrevDecl->TypeForDecl; 4541 assert(isa<InjectedClassNameType>(Decl->TypeForDecl)); 4542 } else { 4543 Type *newType = 4544 new (*this, TypeAlignment) InjectedClassNameType(Decl, TST); 4545 Decl->TypeForDecl = newType; 4546 Types.push_back(newType); 4547 } 4548 return QualType(Decl->TypeForDecl, 0); 4549 } 4550 4551 /// getTypeDeclType - Return the unique reference to the type for the 4552 /// specified type declaration. 4553 QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const { 4554 assert(Decl && "Passed null for Decl param"); 4555 assert(!Decl->TypeForDecl && "TypeForDecl present in slow case"); 4556 4557 if (const auto *Typedef = dyn_cast<TypedefNameDecl>(Decl)) 4558 return getTypedefType(Typedef); 4559 4560 assert(!isa<TemplateTypeParmDecl>(Decl) && 4561 "Template type parameter types are always available."); 4562 4563 if (const auto *Record = dyn_cast<RecordDecl>(Decl)) { 4564 assert(Record->isFirstDecl() && "struct/union has previous declaration"); 4565 assert(!NeedsInjectedClassNameType(Record)); 4566 return getRecordType(Record); 4567 } else if (const auto *Enum = dyn_cast<EnumDecl>(Decl)) { 4568 assert(Enum->isFirstDecl() && "enum has previous declaration"); 4569 return getEnumType(Enum); 4570 } else if (const auto *Using = dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) { 4571 Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using); 4572 Decl->TypeForDecl = newType; 4573 Types.push_back(newType); 4574 } else 4575 llvm_unreachable("TypeDecl without a type?"); 4576 4577 return QualType(Decl->TypeForDecl, 0); 4578 } 4579 4580 /// getTypedefType - Return the unique reference to the type for the 4581 /// specified typedef name decl. 4582 QualType ASTContext::getTypedefType(const TypedefNameDecl *Decl, 4583 QualType Underlying) const { 4584 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4585 4586 if (Underlying.isNull()) 4587 Underlying = Decl->getUnderlyingType(); 4588 QualType Canonical = getCanonicalType(Underlying); 4589 auto *newType = new (*this, TypeAlignment) 4590 TypedefType(Type::Typedef, Decl, Underlying, Canonical); 4591 Decl->TypeForDecl = newType; 4592 Types.push_back(newType); 4593 return QualType(newType, 0); 4594 } 4595 4596 QualType ASTContext::getRecordType(const RecordDecl *Decl) const { 4597 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4598 4599 if (const RecordDecl *PrevDecl = Decl->getPreviousDecl()) 4600 if (PrevDecl->TypeForDecl) 4601 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4602 4603 auto *newType = new (*this, TypeAlignment) RecordType(Decl); 4604 Decl->TypeForDecl = newType; 4605 Types.push_back(newType); 4606 return QualType(newType, 0); 4607 } 4608 4609 QualType ASTContext::getEnumType(const EnumDecl *Decl) const { 4610 if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0); 4611 4612 if (const EnumDecl *PrevDecl = Decl->getPreviousDecl()) 4613 if (PrevDecl->TypeForDecl) 4614 return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0); 4615 4616 auto *newType = new (*this, TypeAlignment) EnumType(Decl); 4617 Decl->TypeForDecl = newType; 4618 Types.push_back(newType); 4619 return QualType(newType, 0); 4620 } 4621 4622 QualType ASTContext::getAttributedType(attr::Kind attrKind, 4623 QualType modifiedType, 4624 QualType equivalentType) { 4625 llvm::FoldingSetNodeID id; 4626 AttributedType::Profile(id, attrKind, modifiedType, equivalentType); 4627 4628 void *insertPos = nullptr; 4629 AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos); 4630 if (type) return QualType(type, 0); 4631 4632 QualType canon = getCanonicalType(equivalentType); 4633 type = new (*this, TypeAlignment) 4634 AttributedType(canon, attrKind, modifiedType, equivalentType); 4635 4636 Types.push_back(type); 4637 AttributedTypes.InsertNode(type, insertPos); 4638 4639 return QualType(type, 0); 4640 } 4641 4642 /// Retrieve a substitution-result type. 4643 QualType 4644 ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm, 4645 QualType Replacement) const { 4646 assert(Replacement.isCanonical() 4647 && "replacement types must always be canonical"); 4648 4649 llvm::FoldingSetNodeID ID; 4650 SubstTemplateTypeParmType::Profile(ID, Parm, Replacement); 4651 void *InsertPos = nullptr; 4652 SubstTemplateTypeParmType *SubstParm 4653 = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4654 4655 if (!SubstParm) { 4656 SubstParm = new (*this, TypeAlignment) 4657 SubstTemplateTypeParmType(Parm, Replacement); 4658 Types.push_back(SubstParm); 4659 SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos); 4660 } 4661 4662 return QualType(SubstParm, 0); 4663 } 4664 4665 /// Retrieve a 4666 QualType ASTContext::getSubstTemplateTypeParmPackType( 4667 const TemplateTypeParmType *Parm, 4668 const TemplateArgument &ArgPack) { 4669 #ifndef NDEBUG 4670 for (const auto &P : ArgPack.pack_elements()) { 4671 assert(P.getKind() == TemplateArgument::Type &&"Pack contains a non-type"); 4672 assert(P.getAsType().isCanonical() && "Pack contains non-canonical type"); 4673 } 4674 #endif 4675 4676 llvm::FoldingSetNodeID ID; 4677 SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack); 4678 void *InsertPos = nullptr; 4679 if (SubstTemplateTypeParmPackType *SubstParm 4680 = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos)) 4681 return QualType(SubstParm, 0); 4682 4683 QualType Canon; 4684 if (!Parm->isCanonicalUnqualified()) { 4685 Canon = getCanonicalType(QualType(Parm, 0)); 4686 Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon), 4687 ArgPack); 4688 SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos); 4689 } 4690 4691 auto *SubstParm 4692 = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon, 4693 ArgPack); 4694 Types.push_back(SubstParm); 4695 SubstTemplateTypeParmPackTypes.InsertNode(SubstParm, InsertPos); 4696 return QualType(SubstParm, 0); 4697 } 4698 4699 /// Retrieve the template type parameter type for a template 4700 /// parameter or parameter pack with the given depth, index, and (optionally) 4701 /// name. 4702 QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index, 4703 bool ParameterPack, 4704 TemplateTypeParmDecl *TTPDecl) const { 4705 llvm::FoldingSetNodeID ID; 4706 TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl); 4707 void *InsertPos = nullptr; 4708 TemplateTypeParmType *TypeParm 4709 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4710 4711 if (TypeParm) 4712 return QualType(TypeParm, 0); 4713 4714 if (TTPDecl) { 4715 QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack); 4716 TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon); 4717 4718 TemplateTypeParmType *TypeCheck 4719 = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos); 4720 assert(!TypeCheck && "Template type parameter canonical type broken"); 4721 (void)TypeCheck; 4722 } else 4723 TypeParm = new (*this, TypeAlignment) 4724 TemplateTypeParmType(Depth, Index, ParameterPack); 4725 4726 Types.push_back(TypeParm); 4727 TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos); 4728 4729 return QualType(TypeParm, 0); 4730 } 4731 4732 TypeSourceInfo * 4733 ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name, 4734 SourceLocation NameLoc, 4735 const TemplateArgumentListInfo &Args, 4736 QualType Underlying) const { 4737 assert(!Name.getAsDependentTemplateName() && 4738 "No dependent template names here!"); 4739 QualType TST = getTemplateSpecializationType(Name, Args, Underlying); 4740 4741 TypeSourceInfo *DI = CreateTypeSourceInfo(TST); 4742 TemplateSpecializationTypeLoc TL = 4743 DI->getTypeLoc().castAs<TemplateSpecializationTypeLoc>(); 4744 TL.setTemplateKeywordLoc(SourceLocation()); 4745 TL.setTemplateNameLoc(NameLoc); 4746 TL.setLAngleLoc(Args.getLAngleLoc()); 4747 TL.setRAngleLoc(Args.getRAngleLoc()); 4748 for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) 4749 TL.setArgLocInfo(i, Args[i].getLocInfo()); 4750 return DI; 4751 } 4752 4753 QualType 4754 ASTContext::getTemplateSpecializationType(TemplateName Template, 4755 const TemplateArgumentListInfo &Args, 4756 QualType Underlying) const { 4757 assert(!Template.getAsDependentTemplateName() && 4758 "No dependent template names here!"); 4759 4760 SmallVector<TemplateArgument, 4> ArgVec; 4761 ArgVec.reserve(Args.size()); 4762 for (const TemplateArgumentLoc &Arg : Args.arguments()) 4763 ArgVec.push_back(Arg.getArgument()); 4764 4765 return getTemplateSpecializationType(Template, ArgVec, Underlying); 4766 } 4767 4768 #ifndef NDEBUG 4769 static bool hasAnyPackExpansions(ArrayRef<TemplateArgument> Args) { 4770 for (const TemplateArgument &Arg : Args) 4771 if (Arg.isPackExpansion()) 4772 return true; 4773 4774 return true; 4775 } 4776 #endif 4777 4778 QualType 4779 ASTContext::getTemplateSpecializationType(TemplateName Template, 4780 ArrayRef<TemplateArgument> Args, 4781 QualType Underlying) const { 4782 assert(!Template.getAsDependentTemplateName() && 4783 "No dependent template names here!"); 4784 // Look through qualified template names. 4785 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4786 Template = TemplateName(QTN->getTemplateDecl()); 4787 4788 bool IsTypeAlias = 4789 Template.getAsTemplateDecl() && 4790 isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl()); 4791 QualType CanonType; 4792 if (!Underlying.isNull()) 4793 CanonType = getCanonicalType(Underlying); 4794 else { 4795 // We can get here with an alias template when the specialization contains 4796 // a pack expansion that does not match up with a parameter pack. 4797 assert((!IsTypeAlias || hasAnyPackExpansions(Args)) && 4798 "Caller must compute aliased type"); 4799 IsTypeAlias = false; 4800 CanonType = getCanonicalTemplateSpecializationType(Template, Args); 4801 } 4802 4803 // Allocate the (non-canonical) template specialization type, but don't 4804 // try to unique it: these types typically have location information that 4805 // we don't unique and don't want to lose. 4806 void *Mem = Allocate(sizeof(TemplateSpecializationType) + 4807 sizeof(TemplateArgument) * Args.size() + 4808 (IsTypeAlias? sizeof(QualType) : 0), 4809 TypeAlignment); 4810 auto *Spec 4811 = new (Mem) TemplateSpecializationType(Template, Args, CanonType, 4812 IsTypeAlias ? Underlying : QualType()); 4813 4814 Types.push_back(Spec); 4815 return QualType(Spec, 0); 4816 } 4817 4818 static bool 4819 getCanonicalTemplateArguments(const ASTContext &C, 4820 ArrayRef<TemplateArgument> OrigArgs, 4821 SmallVectorImpl<TemplateArgument> &CanonArgs) { 4822 bool AnyNonCanonArgs = false; 4823 unsigned NumArgs = OrigArgs.size(); 4824 CanonArgs.resize(NumArgs); 4825 for (unsigned I = 0; I != NumArgs; ++I) { 4826 const TemplateArgument &OrigArg = OrigArgs[I]; 4827 TemplateArgument &CanonArg = CanonArgs[I]; 4828 CanonArg = C.getCanonicalTemplateArgument(OrigArg); 4829 if (!CanonArg.structurallyEquals(OrigArg)) 4830 AnyNonCanonArgs = true; 4831 } 4832 return AnyNonCanonArgs; 4833 } 4834 4835 QualType ASTContext::getCanonicalTemplateSpecializationType( 4836 TemplateName Template, ArrayRef<TemplateArgument> Args) const { 4837 assert(!Template.getAsDependentTemplateName() && 4838 "No dependent template names here!"); 4839 4840 // Look through qualified template names. 4841 if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) 4842 Template = TemplateName(QTN->getTemplateDecl()); 4843 4844 // Build the canonical template specialization type. 4845 TemplateName CanonTemplate = getCanonicalTemplateName(Template); 4846 SmallVector<TemplateArgument, 4> CanonArgs; 4847 ::getCanonicalTemplateArguments(*this, Args, CanonArgs); 4848 4849 // Determine whether this canonical template specialization type already 4850 // exists. 4851 llvm::FoldingSetNodeID ID; 4852 TemplateSpecializationType::Profile(ID, CanonTemplate, 4853 CanonArgs, *this); 4854 4855 void *InsertPos = nullptr; 4856 TemplateSpecializationType *Spec 4857 = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 4858 4859 if (!Spec) { 4860 // Allocate a new canonical template specialization type. 4861 void *Mem = Allocate((sizeof(TemplateSpecializationType) + 4862 sizeof(TemplateArgument) * CanonArgs.size()), 4863 TypeAlignment); 4864 Spec = new (Mem) TemplateSpecializationType(CanonTemplate, 4865 CanonArgs, 4866 QualType(), QualType()); 4867 Types.push_back(Spec); 4868 TemplateSpecializationTypes.InsertNode(Spec, InsertPos); 4869 } 4870 4871 assert(Spec->isDependentType() && 4872 "Non-dependent template-id type must have a canonical type"); 4873 return QualType(Spec, 0); 4874 } 4875 4876 QualType ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword, 4877 NestedNameSpecifier *NNS, 4878 QualType NamedType, 4879 TagDecl *OwnedTagDecl) const { 4880 llvm::FoldingSetNodeID ID; 4881 ElaboratedType::Profile(ID, Keyword, NNS, NamedType, OwnedTagDecl); 4882 4883 void *InsertPos = nullptr; 4884 ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 4885 if (T) 4886 return QualType(T, 0); 4887 4888 QualType Canon = NamedType; 4889 if (!Canon.isCanonical()) { 4890 Canon = getCanonicalType(NamedType); 4891 ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos); 4892 assert(!CheckT && "Elaborated canonical type broken"); 4893 (void)CheckT; 4894 } 4895 4896 void *Mem = Allocate(ElaboratedType::totalSizeToAlloc<TagDecl *>(!!OwnedTagDecl), 4897 TypeAlignment); 4898 T = new (Mem) ElaboratedType(Keyword, NNS, NamedType, Canon, OwnedTagDecl); 4899 4900 Types.push_back(T); 4901 ElaboratedTypes.InsertNode(T, InsertPos); 4902 return QualType(T, 0); 4903 } 4904 4905 QualType 4906 ASTContext::getParenType(QualType InnerType) const { 4907 llvm::FoldingSetNodeID ID; 4908 ParenType::Profile(ID, InnerType); 4909 4910 void *InsertPos = nullptr; 4911 ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 4912 if (T) 4913 return QualType(T, 0); 4914 4915 QualType Canon = InnerType; 4916 if (!Canon.isCanonical()) { 4917 Canon = getCanonicalType(InnerType); 4918 ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos); 4919 assert(!CheckT && "Paren canonical type broken"); 4920 (void)CheckT; 4921 } 4922 4923 T = new (*this, TypeAlignment) ParenType(InnerType, Canon); 4924 Types.push_back(T); 4925 ParenTypes.InsertNode(T, InsertPos); 4926 return QualType(T, 0); 4927 } 4928 4929 QualType 4930 ASTContext::getMacroQualifiedType(QualType UnderlyingTy, 4931 const IdentifierInfo *MacroII) const { 4932 QualType Canon = UnderlyingTy; 4933 if (!Canon.isCanonical()) 4934 Canon = getCanonicalType(UnderlyingTy); 4935 4936 auto *newType = new (*this, TypeAlignment) 4937 MacroQualifiedType(UnderlyingTy, Canon, MacroII); 4938 Types.push_back(newType); 4939 return QualType(newType, 0); 4940 } 4941 4942 QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword, 4943 NestedNameSpecifier *NNS, 4944 const IdentifierInfo *Name, 4945 QualType Canon) const { 4946 if (Canon.isNull()) { 4947 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 4948 if (CanonNNS != NNS) 4949 Canon = getDependentNameType(Keyword, CanonNNS, Name); 4950 } 4951 4952 llvm::FoldingSetNodeID ID; 4953 DependentNameType::Profile(ID, Keyword, NNS, Name); 4954 4955 void *InsertPos = nullptr; 4956 DependentNameType *T 4957 = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos); 4958 if (T) 4959 return QualType(T, 0); 4960 4961 T = new (*this, TypeAlignment) DependentNameType(Keyword, NNS, Name, Canon); 4962 Types.push_back(T); 4963 DependentNameTypes.InsertNode(T, InsertPos); 4964 return QualType(T, 0); 4965 } 4966 4967 QualType 4968 ASTContext::getDependentTemplateSpecializationType( 4969 ElaboratedTypeKeyword Keyword, 4970 NestedNameSpecifier *NNS, 4971 const IdentifierInfo *Name, 4972 const TemplateArgumentListInfo &Args) const { 4973 // TODO: avoid this copy 4974 SmallVector<TemplateArgument, 16> ArgCopy; 4975 for (unsigned I = 0, E = Args.size(); I != E; ++I) 4976 ArgCopy.push_back(Args[I].getArgument()); 4977 return getDependentTemplateSpecializationType(Keyword, NNS, Name, ArgCopy); 4978 } 4979 4980 QualType 4981 ASTContext::getDependentTemplateSpecializationType( 4982 ElaboratedTypeKeyword Keyword, 4983 NestedNameSpecifier *NNS, 4984 const IdentifierInfo *Name, 4985 ArrayRef<TemplateArgument> Args) const { 4986 assert((!NNS || NNS->isDependent()) && 4987 "nested-name-specifier must be dependent"); 4988 4989 llvm::FoldingSetNodeID ID; 4990 DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS, 4991 Name, Args); 4992 4993 void *InsertPos = nullptr; 4994 DependentTemplateSpecializationType *T 4995 = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 4996 if (T) 4997 return QualType(T, 0); 4998 4999 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 5000 5001 ElaboratedTypeKeyword CanonKeyword = Keyword; 5002 if (Keyword == ETK_None) CanonKeyword = ETK_Typename; 5003 5004 SmallVector<TemplateArgument, 16> CanonArgs; 5005 bool AnyNonCanonArgs = 5006 ::getCanonicalTemplateArguments(*this, Args, CanonArgs); 5007 5008 QualType Canon; 5009 if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) { 5010 Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS, 5011 Name, 5012 CanonArgs); 5013 5014 // Find the insert position again. 5015 DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos); 5016 } 5017 5018 void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) + 5019 sizeof(TemplateArgument) * Args.size()), 5020 TypeAlignment); 5021 T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS, 5022 Name, Args, Canon); 5023 Types.push_back(T); 5024 DependentTemplateSpecializationTypes.InsertNode(T, InsertPos); 5025 return QualType(T, 0); 5026 } 5027 5028 TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) { 5029 TemplateArgument Arg; 5030 if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { 5031 QualType ArgType = getTypeDeclType(TTP); 5032 if (TTP->isParameterPack()) 5033 ArgType = getPackExpansionType(ArgType, None); 5034 5035 Arg = TemplateArgument(ArgType); 5036 } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { 5037 QualType T = 5038 NTTP->getType().getNonPackExpansionType().getNonLValueExprType(*this); 5039 // For class NTTPs, ensure we include the 'const' so the type matches that 5040 // of a real template argument. 5041 // FIXME: It would be more faithful to model this as something like an 5042 // lvalue-to-rvalue conversion applied to a const-qualified lvalue. 5043 if (T->isRecordType()) 5044 T.addConst(); 5045 Expr *E = new (*this) DeclRefExpr( 5046 *this, NTTP, /*enclosing*/ false, T, 5047 Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation()); 5048 5049 if (NTTP->isParameterPack()) 5050 E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(), 5051 None); 5052 Arg = TemplateArgument(E); 5053 } else { 5054 auto *TTP = cast<TemplateTemplateParmDecl>(Param); 5055 if (TTP->isParameterPack()) 5056 Arg = TemplateArgument(TemplateName(TTP), Optional<unsigned>()); 5057 else 5058 Arg = TemplateArgument(TemplateName(TTP)); 5059 } 5060 5061 if (Param->isTemplateParameterPack()) 5062 Arg = TemplateArgument::CreatePackCopy(*this, Arg); 5063 5064 return Arg; 5065 } 5066 5067 void 5068 ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params, 5069 SmallVectorImpl<TemplateArgument> &Args) { 5070 Args.reserve(Args.size() + Params->size()); 5071 5072 for (NamedDecl *Param : *Params) 5073 Args.push_back(getInjectedTemplateArg(Param)); 5074 } 5075 5076 QualType ASTContext::getPackExpansionType(QualType Pattern, 5077 Optional<unsigned> NumExpansions, 5078 bool ExpectPackInType) { 5079 assert((!ExpectPackInType || Pattern->containsUnexpandedParameterPack()) && 5080 "Pack expansions must expand one or more parameter packs"); 5081 5082 llvm::FoldingSetNodeID ID; 5083 PackExpansionType::Profile(ID, Pattern, NumExpansions); 5084 5085 void *InsertPos = nullptr; 5086 PackExpansionType *T = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5087 if (T) 5088 return QualType(T, 0); 5089 5090 QualType Canon; 5091 if (!Pattern.isCanonical()) { 5092 Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions, 5093 /*ExpectPackInType=*/false); 5094 5095 // Find the insert position again, in case we inserted an element into 5096 // PackExpansionTypes and invalidated our insert position. 5097 PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos); 5098 } 5099 5100 T = new (*this, TypeAlignment) 5101 PackExpansionType(Pattern, Canon, NumExpansions); 5102 Types.push_back(T); 5103 PackExpansionTypes.InsertNode(T, InsertPos); 5104 return QualType(T, 0); 5105 } 5106 5107 /// CmpProtocolNames - Comparison predicate for sorting protocols 5108 /// alphabetically. 5109 static int CmpProtocolNames(ObjCProtocolDecl *const *LHS, 5110 ObjCProtocolDecl *const *RHS) { 5111 return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName()); 5112 } 5113 5114 static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) { 5115 if (Protocols.empty()) return true; 5116 5117 if (Protocols[0]->getCanonicalDecl() != Protocols[0]) 5118 return false; 5119 5120 for (unsigned i = 1; i != Protocols.size(); ++i) 5121 if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 || 5122 Protocols[i]->getCanonicalDecl() != Protocols[i]) 5123 return false; 5124 return true; 5125 } 5126 5127 static void 5128 SortAndUniqueProtocols(SmallVectorImpl<ObjCProtocolDecl *> &Protocols) { 5129 // Sort protocols, keyed by name. 5130 llvm::array_pod_sort(Protocols.begin(), Protocols.end(), CmpProtocolNames); 5131 5132 // Canonicalize. 5133 for (ObjCProtocolDecl *&P : Protocols) 5134 P = P->getCanonicalDecl(); 5135 5136 // Remove duplicates. 5137 auto ProtocolsEnd = std::unique(Protocols.begin(), Protocols.end()); 5138 Protocols.erase(ProtocolsEnd, Protocols.end()); 5139 } 5140 5141 QualType ASTContext::getObjCObjectType(QualType BaseType, 5142 ObjCProtocolDecl * const *Protocols, 5143 unsigned NumProtocols) const { 5144 return getObjCObjectType(BaseType, {}, 5145 llvm::makeArrayRef(Protocols, NumProtocols), 5146 /*isKindOf=*/false); 5147 } 5148 5149 QualType ASTContext::getObjCObjectType( 5150 QualType baseType, 5151 ArrayRef<QualType> typeArgs, 5152 ArrayRef<ObjCProtocolDecl *> protocols, 5153 bool isKindOf) const { 5154 // If the base type is an interface and there aren't any protocols or 5155 // type arguments to add, then the interface type will do just fine. 5156 if (typeArgs.empty() && protocols.empty() && !isKindOf && 5157 isa<ObjCInterfaceType>(baseType)) 5158 return baseType; 5159 5160 // Look in the folding set for an existing type. 5161 llvm::FoldingSetNodeID ID; 5162 ObjCObjectTypeImpl::Profile(ID, baseType, typeArgs, protocols, isKindOf); 5163 void *InsertPos = nullptr; 5164 if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos)) 5165 return QualType(QT, 0); 5166 5167 // Determine the type arguments to be used for canonicalization, 5168 // which may be explicitly specified here or written on the base 5169 // type. 5170 ArrayRef<QualType> effectiveTypeArgs = typeArgs; 5171 if (effectiveTypeArgs.empty()) { 5172 if (const auto *baseObject = baseType->getAs<ObjCObjectType>()) 5173 effectiveTypeArgs = baseObject->getTypeArgs(); 5174 } 5175 5176 // Build the canonical type, which has the canonical base type and a 5177 // sorted-and-uniqued list of protocols and the type arguments 5178 // canonicalized. 5179 QualType canonical; 5180 bool typeArgsAreCanonical = llvm::all_of( 5181 effectiveTypeArgs, [&](QualType type) { return type.isCanonical(); }); 5182 bool protocolsSorted = areSortedAndUniqued(protocols); 5183 if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) { 5184 // Determine the canonical type arguments. 5185 ArrayRef<QualType> canonTypeArgs; 5186 SmallVector<QualType, 4> canonTypeArgsVec; 5187 if (!typeArgsAreCanonical) { 5188 canonTypeArgsVec.reserve(effectiveTypeArgs.size()); 5189 for (auto typeArg : effectiveTypeArgs) 5190 canonTypeArgsVec.push_back(getCanonicalType(typeArg)); 5191 canonTypeArgs = canonTypeArgsVec; 5192 } else { 5193 canonTypeArgs = effectiveTypeArgs; 5194 } 5195 5196 ArrayRef<ObjCProtocolDecl *> canonProtocols; 5197 SmallVector<ObjCProtocolDecl*, 8> canonProtocolsVec; 5198 if (!protocolsSorted) { 5199 canonProtocolsVec.append(protocols.begin(), protocols.end()); 5200 SortAndUniqueProtocols(canonProtocolsVec); 5201 canonProtocols = canonProtocolsVec; 5202 } else { 5203 canonProtocols = protocols; 5204 } 5205 5206 canonical = getObjCObjectType(getCanonicalType(baseType), canonTypeArgs, 5207 canonProtocols, isKindOf); 5208 5209 // Regenerate InsertPos. 5210 ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos); 5211 } 5212 5213 unsigned size = sizeof(ObjCObjectTypeImpl); 5214 size += typeArgs.size() * sizeof(QualType); 5215 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5216 void *mem = Allocate(size, TypeAlignment); 5217 auto *T = 5218 new (mem) ObjCObjectTypeImpl(canonical, baseType, typeArgs, protocols, 5219 isKindOf); 5220 5221 Types.push_back(T); 5222 ObjCObjectTypes.InsertNode(T, InsertPos); 5223 return QualType(T, 0); 5224 } 5225 5226 /// Apply Objective-C protocol qualifiers to the given type. 5227 /// If this is for the canonical type of a type parameter, we can apply 5228 /// protocol qualifiers on the ObjCObjectPointerType. 5229 QualType 5230 ASTContext::applyObjCProtocolQualifiers(QualType type, 5231 ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError, 5232 bool allowOnPointerType) const { 5233 hasError = false; 5234 5235 if (const auto *objT = dyn_cast<ObjCTypeParamType>(type.getTypePtr())) { 5236 return getObjCTypeParamType(objT->getDecl(), protocols); 5237 } 5238 5239 // Apply protocol qualifiers to ObjCObjectPointerType. 5240 if (allowOnPointerType) { 5241 if (const auto *objPtr = 5242 dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) { 5243 const ObjCObjectType *objT = objPtr->getObjectType(); 5244 // Merge protocol lists and construct ObjCObjectType. 5245 SmallVector<ObjCProtocolDecl*, 8> protocolsVec; 5246 protocolsVec.append(objT->qual_begin(), 5247 objT->qual_end()); 5248 protocolsVec.append(protocols.begin(), protocols.end()); 5249 ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec; 5250 type = getObjCObjectType( 5251 objT->getBaseType(), 5252 objT->getTypeArgsAsWritten(), 5253 protocols, 5254 objT->isKindOfTypeAsWritten()); 5255 return getObjCObjectPointerType(type); 5256 } 5257 } 5258 5259 // Apply protocol qualifiers to ObjCObjectType. 5260 if (const auto *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){ 5261 // FIXME: Check for protocols to which the class type is already 5262 // known to conform. 5263 5264 return getObjCObjectType(objT->getBaseType(), 5265 objT->getTypeArgsAsWritten(), 5266 protocols, 5267 objT->isKindOfTypeAsWritten()); 5268 } 5269 5270 // If the canonical type is ObjCObjectType, ... 5271 if (type->isObjCObjectType()) { 5272 // Silently overwrite any existing protocol qualifiers. 5273 // TODO: determine whether that's the right thing to do. 5274 5275 // FIXME: Check for protocols to which the class type is already 5276 // known to conform. 5277 return getObjCObjectType(type, {}, protocols, false); 5278 } 5279 5280 // id<protocol-list> 5281 if (type->isObjCIdType()) { 5282 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5283 type = getObjCObjectType(ObjCBuiltinIdTy, {}, protocols, 5284 objPtr->isKindOfType()); 5285 return getObjCObjectPointerType(type); 5286 } 5287 5288 // Class<protocol-list> 5289 if (type->isObjCClassType()) { 5290 const auto *objPtr = type->castAs<ObjCObjectPointerType>(); 5291 type = getObjCObjectType(ObjCBuiltinClassTy, {}, protocols, 5292 objPtr->isKindOfType()); 5293 return getObjCObjectPointerType(type); 5294 } 5295 5296 hasError = true; 5297 return type; 5298 } 5299 5300 QualType 5301 ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl, 5302 ArrayRef<ObjCProtocolDecl *> protocols) const { 5303 // Look in the folding set for an existing type. 5304 llvm::FoldingSetNodeID ID; 5305 ObjCTypeParamType::Profile(ID, Decl, Decl->getUnderlyingType(), protocols); 5306 void *InsertPos = nullptr; 5307 if (ObjCTypeParamType *TypeParam = 5308 ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos)) 5309 return QualType(TypeParam, 0); 5310 5311 // We canonicalize to the underlying type. 5312 QualType Canonical = getCanonicalType(Decl->getUnderlyingType()); 5313 if (!protocols.empty()) { 5314 // Apply the protocol qualifers. 5315 bool hasError; 5316 Canonical = getCanonicalType(applyObjCProtocolQualifiers( 5317 Canonical, protocols, hasError, true /*allowOnPointerType*/)); 5318 assert(!hasError && "Error when apply protocol qualifier to bound type"); 5319 } 5320 5321 unsigned size = sizeof(ObjCTypeParamType); 5322 size += protocols.size() * sizeof(ObjCProtocolDecl *); 5323 void *mem = Allocate(size, TypeAlignment); 5324 auto *newType = new (mem) ObjCTypeParamType(Decl, Canonical, protocols); 5325 5326 Types.push_back(newType); 5327 ObjCTypeParamTypes.InsertNode(newType, InsertPos); 5328 return QualType(newType, 0); 5329 } 5330 5331 void ASTContext::adjustObjCTypeParamBoundType(const ObjCTypeParamDecl *Orig, 5332 ObjCTypeParamDecl *New) const { 5333 New->setTypeSourceInfo(getTrivialTypeSourceInfo(Orig->getUnderlyingType())); 5334 // Update TypeForDecl after updating TypeSourceInfo. 5335 auto NewTypeParamTy = cast<ObjCTypeParamType>(New->getTypeForDecl()); 5336 SmallVector<ObjCProtocolDecl *, 8> protocols; 5337 protocols.append(NewTypeParamTy->qual_begin(), NewTypeParamTy->qual_end()); 5338 QualType UpdatedTy = getObjCTypeParamType(New, protocols); 5339 New->setTypeForDecl(UpdatedTy.getTypePtr()); 5340 } 5341 5342 /// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's 5343 /// protocol list adopt all protocols in QT's qualified-id protocol 5344 /// list. 5345 bool ASTContext::ObjCObjectAdoptsQTypeProtocols(QualType QT, 5346 ObjCInterfaceDecl *IC) { 5347 if (!QT->isObjCQualifiedIdType()) 5348 return false; 5349 5350 if (const auto *OPT = QT->getAs<ObjCObjectPointerType>()) { 5351 // If both the right and left sides have qualifiers. 5352 for (auto *Proto : OPT->quals()) { 5353 if (!IC->ClassImplementsProtocol(Proto, false)) 5354 return false; 5355 } 5356 return true; 5357 } 5358 return false; 5359 } 5360 5361 /// QIdProtocolsAdoptObjCObjectProtocols - Checks that protocols in 5362 /// QT's qualified-id protocol list adopt all protocols in IDecl's list 5363 /// of protocols. 5364 bool ASTContext::QIdProtocolsAdoptObjCObjectProtocols(QualType QT, 5365 ObjCInterfaceDecl *IDecl) { 5366 if (!QT->isObjCQualifiedIdType()) 5367 return false; 5368 const auto *OPT = QT->getAs<ObjCObjectPointerType>(); 5369 if (!OPT) 5370 return false; 5371 if (!IDecl->hasDefinition()) 5372 return false; 5373 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocols; 5374 CollectInheritedProtocols(IDecl, InheritedProtocols); 5375 if (InheritedProtocols.empty()) 5376 return false; 5377 // Check that if every protocol in list of id<plist> conforms to a protocol 5378 // of IDecl's, then bridge casting is ok. 5379 bool Conforms = false; 5380 for (auto *Proto : OPT->quals()) { 5381 Conforms = false; 5382 for (auto *PI : InheritedProtocols) { 5383 if (ProtocolCompatibleWithProtocol(Proto, PI)) { 5384 Conforms = true; 5385 break; 5386 } 5387 } 5388 if (!Conforms) 5389 break; 5390 } 5391 if (Conforms) 5392 return true; 5393 5394 for (auto *PI : InheritedProtocols) { 5395 // If both the right and left sides have qualifiers. 5396 bool Adopts = false; 5397 for (auto *Proto : OPT->quals()) { 5398 // return 'true' if 'PI' is in the inheritance hierarchy of Proto 5399 if ((Adopts = ProtocolCompatibleWithProtocol(PI, Proto))) 5400 break; 5401 } 5402 if (!Adopts) 5403 return false; 5404 } 5405 return true; 5406 } 5407 5408 /// getObjCObjectPointerType - Return a ObjCObjectPointerType type for 5409 /// the given object type. 5410 QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const { 5411 llvm::FoldingSetNodeID ID; 5412 ObjCObjectPointerType::Profile(ID, ObjectT); 5413 5414 void *InsertPos = nullptr; 5415 if (ObjCObjectPointerType *QT = 5416 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos)) 5417 return QualType(QT, 0); 5418 5419 // Find the canonical object type. 5420 QualType Canonical; 5421 if (!ObjectT.isCanonical()) { 5422 Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT)); 5423 5424 // Regenerate InsertPos. 5425 ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos); 5426 } 5427 5428 // No match. 5429 void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment); 5430 auto *QType = 5431 new (Mem) ObjCObjectPointerType(Canonical, ObjectT); 5432 5433 Types.push_back(QType); 5434 ObjCObjectPointerTypes.InsertNode(QType, InsertPos); 5435 return QualType(QType, 0); 5436 } 5437 5438 /// getObjCInterfaceType - Return the unique reference to the type for the 5439 /// specified ObjC interface decl. The list of protocols is optional. 5440 QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl, 5441 ObjCInterfaceDecl *PrevDecl) const { 5442 if (Decl->TypeForDecl) 5443 return QualType(Decl->TypeForDecl, 0); 5444 5445 if (PrevDecl) { 5446 assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl"); 5447 Decl->TypeForDecl = PrevDecl->TypeForDecl; 5448 return QualType(PrevDecl->TypeForDecl, 0); 5449 } 5450 5451 // Prefer the definition, if there is one. 5452 if (const ObjCInterfaceDecl *Def = Decl->getDefinition()) 5453 Decl = Def; 5454 5455 void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment); 5456 auto *T = new (Mem) ObjCInterfaceType(Decl); 5457 Decl->TypeForDecl = T; 5458 Types.push_back(T); 5459 return QualType(T, 0); 5460 } 5461 5462 /// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique 5463 /// TypeOfExprType AST's (since expression's are never shared). For example, 5464 /// multiple declarations that refer to "typeof(x)" all contain different 5465 /// DeclRefExpr's. This doesn't effect the type checker, since it operates 5466 /// on canonical type's (which are always unique). 5467 QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const { 5468 TypeOfExprType *toe; 5469 if (tofExpr->isTypeDependent()) { 5470 llvm::FoldingSetNodeID ID; 5471 DependentTypeOfExprType::Profile(ID, *this, tofExpr); 5472 5473 void *InsertPos = nullptr; 5474 DependentTypeOfExprType *Canon 5475 = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos); 5476 if (Canon) { 5477 // We already have a "canonical" version of an identical, dependent 5478 // typeof(expr) type. Use that as our canonical type. 5479 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, 5480 QualType((TypeOfExprType*)Canon, 0)); 5481 } else { 5482 // Build a new, canonical typeof(expr) type. 5483 Canon 5484 = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr); 5485 DependentTypeOfExprTypes.InsertNode(Canon, InsertPos); 5486 toe = Canon; 5487 } 5488 } else { 5489 QualType Canonical = getCanonicalType(tofExpr->getType()); 5490 toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical); 5491 } 5492 Types.push_back(toe); 5493 return QualType(toe, 0); 5494 } 5495 5496 /// getTypeOfType - Unlike many "get<Type>" functions, we don't unique 5497 /// TypeOfType nodes. The only motivation to unique these nodes would be 5498 /// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be 5499 /// an issue. This doesn't affect the type checker, since it operates 5500 /// on canonical types (which are always unique). 5501 QualType ASTContext::getTypeOfType(QualType tofType) const { 5502 QualType Canonical = getCanonicalType(tofType); 5503 auto *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical); 5504 Types.push_back(tot); 5505 return QualType(tot, 0); 5506 } 5507 5508 /// getReferenceQualifiedType - Given an expr, will return the type for 5509 /// that expression, as in [dcl.type.simple]p4 but without taking id-expressions 5510 /// and class member access into account. 5511 QualType ASTContext::getReferenceQualifiedType(const Expr *E) const { 5512 // C++11 [dcl.type.simple]p4: 5513 // [...] 5514 QualType T = E->getType(); 5515 switch (E->getValueKind()) { 5516 // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the 5517 // type of e; 5518 case VK_XValue: 5519 return getRValueReferenceType(T); 5520 // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the 5521 // type of e; 5522 case VK_LValue: 5523 return getLValueReferenceType(T); 5524 // - otherwise, decltype(e) is the type of e. 5525 case VK_PRValue: 5526 return T; 5527 } 5528 llvm_unreachable("Unknown value kind"); 5529 } 5530 5531 /// Unlike many "get<Type>" functions, we don't unique DecltypeType 5532 /// nodes. This would never be helpful, since each such type has its own 5533 /// expression, and would not give a significant memory saving, since there 5534 /// is an Expr tree under each such type. 5535 QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const { 5536 DecltypeType *dt; 5537 5538 // C++11 [temp.type]p2: 5539 // If an expression e involves a template parameter, decltype(e) denotes a 5540 // unique dependent type. Two such decltype-specifiers refer to the same 5541 // type only if their expressions are equivalent (14.5.6.1). 5542 if (e->isInstantiationDependent()) { 5543 llvm::FoldingSetNodeID ID; 5544 DependentDecltypeType::Profile(ID, *this, e); 5545 5546 void *InsertPos = nullptr; 5547 DependentDecltypeType *Canon 5548 = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos); 5549 if (!Canon) { 5550 // Build a new, canonical decltype(expr) type. 5551 Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e); 5552 DependentDecltypeTypes.InsertNode(Canon, InsertPos); 5553 } 5554 dt = new (*this, TypeAlignment) 5555 DecltypeType(e, UnderlyingType, QualType((DecltypeType *)Canon, 0)); 5556 } else { 5557 dt = new (*this, TypeAlignment) 5558 DecltypeType(e, UnderlyingType, getCanonicalType(UnderlyingType)); 5559 } 5560 Types.push_back(dt); 5561 return QualType(dt, 0); 5562 } 5563 5564 /// getUnaryTransformationType - We don't unique these, since the memory 5565 /// savings are minimal and these are rare. 5566 QualType ASTContext::getUnaryTransformType(QualType BaseType, 5567 QualType UnderlyingType, 5568 UnaryTransformType::UTTKind Kind) 5569 const { 5570 UnaryTransformType *ut = nullptr; 5571 5572 if (BaseType->isDependentType()) { 5573 // Look in the folding set for an existing type. 5574 llvm::FoldingSetNodeID ID; 5575 DependentUnaryTransformType::Profile(ID, getCanonicalType(BaseType), Kind); 5576 5577 void *InsertPos = nullptr; 5578 DependentUnaryTransformType *Canon 5579 = DependentUnaryTransformTypes.FindNodeOrInsertPos(ID, InsertPos); 5580 5581 if (!Canon) { 5582 // Build a new, canonical __underlying_type(type) type. 5583 Canon = new (*this, TypeAlignment) 5584 DependentUnaryTransformType(*this, getCanonicalType(BaseType), 5585 Kind); 5586 DependentUnaryTransformTypes.InsertNode(Canon, InsertPos); 5587 } 5588 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5589 QualType(), Kind, 5590 QualType(Canon, 0)); 5591 } else { 5592 QualType CanonType = getCanonicalType(UnderlyingType); 5593 ut = new (*this, TypeAlignment) UnaryTransformType (BaseType, 5594 UnderlyingType, Kind, 5595 CanonType); 5596 } 5597 Types.push_back(ut); 5598 return QualType(ut, 0); 5599 } 5600 5601 QualType ASTContext::getAutoTypeInternal( 5602 QualType DeducedType, AutoTypeKeyword Keyword, bool IsDependent, 5603 bool IsPack, ConceptDecl *TypeConstraintConcept, 5604 ArrayRef<TemplateArgument> TypeConstraintArgs, bool IsCanon) const { 5605 if (DeducedType.isNull() && Keyword == AutoTypeKeyword::Auto && 5606 !TypeConstraintConcept && !IsDependent) 5607 return getAutoDeductType(); 5608 5609 // Look in the folding set for an existing type. 5610 void *InsertPos = nullptr; 5611 llvm::FoldingSetNodeID ID; 5612 AutoType::Profile(ID, *this, DeducedType, Keyword, IsDependent, 5613 TypeConstraintConcept, TypeConstraintArgs); 5614 if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos)) 5615 return QualType(AT, 0); 5616 5617 QualType Canon; 5618 if (!IsCanon) { 5619 if (DeducedType.isNull()) { 5620 SmallVector<TemplateArgument, 4> CanonArgs; 5621 bool AnyNonCanonArgs = 5622 ::getCanonicalTemplateArguments(*this, TypeConstraintArgs, CanonArgs); 5623 if (AnyNonCanonArgs) { 5624 Canon = getAutoTypeInternal(QualType(), Keyword, IsDependent, IsPack, 5625 TypeConstraintConcept, CanonArgs, true); 5626 // Find the insert position again. 5627 AutoTypes.FindNodeOrInsertPos(ID, InsertPos); 5628 } 5629 } else { 5630 Canon = DeducedType.getCanonicalType(); 5631 } 5632 } 5633 5634 void *Mem = Allocate(sizeof(AutoType) + 5635 sizeof(TemplateArgument) * TypeConstraintArgs.size(), 5636 TypeAlignment); 5637 auto *AT = new (Mem) AutoType( 5638 DeducedType, Keyword, 5639 (IsDependent ? TypeDependence::DependentInstantiation 5640 : TypeDependence::None) | 5641 (IsPack ? TypeDependence::UnexpandedPack : TypeDependence::None), 5642 Canon, TypeConstraintConcept, TypeConstraintArgs); 5643 Types.push_back(AT); 5644 AutoTypes.InsertNode(AT, InsertPos); 5645 return QualType(AT, 0); 5646 } 5647 5648 /// getAutoType - Return the uniqued reference to the 'auto' type which has been 5649 /// deduced to the given type, or to the canonical undeduced 'auto' type, or the 5650 /// canonical deduced-but-dependent 'auto' type. 5651 QualType 5652 ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword, 5653 bool IsDependent, bool IsPack, 5654 ConceptDecl *TypeConstraintConcept, 5655 ArrayRef<TemplateArgument> TypeConstraintArgs) const { 5656 assert((!IsPack || IsDependent) && "only use IsPack for a dependent pack"); 5657 assert((!IsDependent || DeducedType.isNull()) && 5658 "A dependent auto should be undeduced"); 5659 return getAutoTypeInternal(DeducedType, Keyword, IsDependent, IsPack, 5660 TypeConstraintConcept, TypeConstraintArgs); 5661 } 5662 5663 /// Return the uniqued reference to the deduced template specialization type 5664 /// which has been deduced to the given type, or to the canonical undeduced 5665 /// such type, or the canonical deduced-but-dependent such type. 5666 QualType ASTContext::getDeducedTemplateSpecializationType( 5667 TemplateName Template, QualType DeducedType, bool IsDependent) const { 5668 // Look in the folding set for an existing type. 5669 void *InsertPos = nullptr; 5670 llvm::FoldingSetNodeID ID; 5671 DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType, 5672 IsDependent); 5673 if (DeducedTemplateSpecializationType *DTST = 5674 DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos)) 5675 return QualType(DTST, 0); 5676 5677 auto *DTST = new (*this, TypeAlignment) 5678 DeducedTemplateSpecializationType(Template, DeducedType, IsDependent); 5679 llvm::FoldingSetNodeID TempID; 5680 DTST->Profile(TempID); 5681 assert(ID == TempID && "ID does not match"); 5682 Types.push_back(DTST); 5683 DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos); 5684 return QualType(DTST, 0); 5685 } 5686 5687 /// getAtomicType - Return the uniqued reference to the atomic type for 5688 /// the given value type. 5689 QualType ASTContext::getAtomicType(QualType T) const { 5690 // Unique pointers, to guarantee there is only one pointer of a particular 5691 // structure. 5692 llvm::FoldingSetNodeID ID; 5693 AtomicType::Profile(ID, T); 5694 5695 void *InsertPos = nullptr; 5696 if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos)) 5697 return QualType(AT, 0); 5698 5699 // If the atomic value type isn't canonical, this won't be a canonical type 5700 // either, so fill in the canonical type field. 5701 QualType Canonical; 5702 if (!T.isCanonical()) { 5703 Canonical = getAtomicType(getCanonicalType(T)); 5704 5705 // Get the new insert position for the node we care about. 5706 AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos); 5707 assert(!NewIP && "Shouldn't be in the map!"); (void)NewIP; 5708 } 5709 auto *New = new (*this, TypeAlignment) AtomicType(T, Canonical); 5710 Types.push_back(New); 5711 AtomicTypes.InsertNode(New, InsertPos); 5712 return QualType(New, 0); 5713 } 5714 5715 /// getAutoDeductType - Get type pattern for deducing against 'auto'. 5716 QualType ASTContext::getAutoDeductType() const { 5717 if (AutoDeductTy.isNull()) 5718 AutoDeductTy = QualType(new (*this, TypeAlignment) 5719 AutoType(QualType(), AutoTypeKeyword::Auto, 5720 TypeDependence::None, QualType(), 5721 /*concept*/ nullptr, /*args*/ {}), 5722 0); 5723 return AutoDeductTy; 5724 } 5725 5726 /// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'. 5727 QualType ASTContext::getAutoRRefDeductType() const { 5728 if (AutoRRefDeductTy.isNull()) 5729 AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType()); 5730 assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern"); 5731 return AutoRRefDeductTy; 5732 } 5733 5734 /// getTagDeclType - Return the unique reference to the type for the 5735 /// specified TagDecl (struct/union/class/enum) decl. 5736 QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { 5737 assert(Decl); 5738 // FIXME: What is the design on getTagDeclType when it requires casting 5739 // away const? mutable? 5740 return getTypeDeclType(const_cast<TagDecl*>(Decl)); 5741 } 5742 5743 /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result 5744 /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and 5745 /// needs to agree with the definition in <stddef.h>. 5746 CanQualType ASTContext::getSizeType() const { 5747 return getFromTargetType(Target->getSizeType()); 5748 } 5749 5750 /// Return the unique signed counterpart of the integer type 5751 /// corresponding to size_t. 5752 CanQualType ASTContext::getSignedSizeType() const { 5753 return getFromTargetType(Target->getSignedSizeType()); 5754 } 5755 5756 /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). 5757 CanQualType ASTContext::getIntMaxType() const { 5758 return getFromTargetType(Target->getIntMaxType()); 5759 } 5760 5761 /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5). 5762 CanQualType ASTContext::getUIntMaxType() const { 5763 return getFromTargetType(Target->getUIntMaxType()); 5764 } 5765 5766 /// getSignedWCharType - Return the type of "signed wchar_t". 5767 /// Used when in C++, as a GCC extension. 5768 QualType ASTContext::getSignedWCharType() const { 5769 // FIXME: derive from "Target" ? 5770 return WCharTy; 5771 } 5772 5773 /// getUnsignedWCharType - Return the type of "unsigned wchar_t". 5774 /// Used when in C++, as a GCC extension. 5775 QualType ASTContext::getUnsignedWCharType() const { 5776 // FIXME: derive from "Target" ? 5777 return UnsignedIntTy; 5778 } 5779 5780 QualType ASTContext::getIntPtrType() const { 5781 return getFromTargetType(Target->getIntPtrType()); 5782 } 5783 5784 QualType ASTContext::getUIntPtrType() const { 5785 return getCorrespondingUnsignedType(getIntPtrType()); 5786 } 5787 5788 /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) 5789 /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). 5790 QualType ASTContext::getPointerDiffType() const { 5791 return getFromTargetType(Target->getPtrDiffType(0)); 5792 } 5793 5794 /// Return the unique unsigned counterpart of "ptrdiff_t" 5795 /// integer type. The standard (C11 7.21.6.1p7) refers to this type 5796 /// in the definition of %tu format specifier. 5797 QualType ASTContext::getUnsignedPointerDiffType() const { 5798 return getFromTargetType(Target->getUnsignedPtrDiffType(0)); 5799 } 5800 5801 /// Return the unique type for "pid_t" defined in 5802 /// <sys/types.h>. We need this to compute the correct type for vfork(). 5803 QualType ASTContext::getProcessIDType() const { 5804 return getFromTargetType(Target->getProcessIDType()); 5805 } 5806 5807 //===----------------------------------------------------------------------===// 5808 // Type Operators 5809 //===----------------------------------------------------------------------===// 5810 5811 CanQualType ASTContext::getCanonicalParamType(QualType T) const { 5812 // Push qualifiers into arrays, and then discard any remaining 5813 // qualifiers. 5814 T = getCanonicalType(T); 5815 T = getVariableArrayDecayedType(T); 5816 const Type *Ty = T.getTypePtr(); 5817 QualType Result; 5818 if (isa<ArrayType>(Ty)) { 5819 Result = getArrayDecayedType(QualType(Ty,0)); 5820 } else if (isa<FunctionType>(Ty)) { 5821 Result = getPointerType(QualType(Ty, 0)); 5822 } else { 5823 Result = QualType(Ty, 0); 5824 } 5825 5826 return CanQualType::CreateUnsafe(Result); 5827 } 5828 5829 QualType ASTContext::getUnqualifiedArrayType(QualType type, 5830 Qualifiers &quals) { 5831 SplitQualType splitType = type.getSplitUnqualifiedType(); 5832 5833 // FIXME: getSplitUnqualifiedType() actually walks all the way to 5834 // the unqualified desugared type and then drops it on the floor. 5835 // We then have to strip that sugar back off with 5836 // getUnqualifiedDesugaredType(), which is silly. 5837 const auto *AT = 5838 dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType()); 5839 5840 // If we don't have an array, just use the results in splitType. 5841 if (!AT) { 5842 quals = splitType.Quals; 5843 return QualType(splitType.Ty, 0); 5844 } 5845 5846 // Otherwise, recurse on the array's element type. 5847 QualType elementType = AT->getElementType(); 5848 QualType unqualElementType = getUnqualifiedArrayType(elementType, quals); 5849 5850 // If that didn't change the element type, AT has no qualifiers, so we 5851 // can just use the results in splitType. 5852 if (elementType == unqualElementType) { 5853 assert(quals.empty()); // from the recursive call 5854 quals = splitType.Quals; 5855 return QualType(splitType.Ty, 0); 5856 } 5857 5858 // Otherwise, add in the qualifiers from the outermost type, then 5859 // build the type back up. 5860 quals.addConsistentQualifiers(splitType.Quals); 5861 5862 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { 5863 return getConstantArrayType(unqualElementType, CAT->getSize(), 5864 CAT->getSizeExpr(), CAT->getSizeModifier(), 0); 5865 } 5866 5867 if (const auto *IAT = dyn_cast<IncompleteArrayType>(AT)) { 5868 return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0); 5869 } 5870 5871 if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) { 5872 return getVariableArrayType(unqualElementType, 5873 VAT->getSizeExpr(), 5874 VAT->getSizeModifier(), 5875 VAT->getIndexTypeCVRQualifiers(), 5876 VAT->getBracketsRange()); 5877 } 5878 5879 const auto *DSAT = cast<DependentSizedArrayType>(AT); 5880 return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(), 5881 DSAT->getSizeModifier(), 0, 5882 SourceRange()); 5883 } 5884 5885 /// Attempt to unwrap two types that may both be array types with the same bound 5886 /// (or both be array types of unknown bound) for the purpose of comparing the 5887 /// cv-decomposition of two types per C++ [conv.qual]. 5888 /// 5889 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 5890 /// C++20 [conv.qual], if permitted by the current language mode. 5891 void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2, 5892 bool AllowPiMismatch) { 5893 while (true) { 5894 auto *AT1 = getAsArrayType(T1); 5895 if (!AT1) 5896 return; 5897 5898 auto *AT2 = getAsArrayType(T2); 5899 if (!AT2) 5900 return; 5901 5902 // If we don't have two array types with the same constant bound nor two 5903 // incomplete array types, we've unwrapped everything we can. 5904 // C++20 also permits one type to be a constant array type and the other 5905 // to be an incomplete array type. 5906 // FIXME: Consider also unwrapping array of unknown bound and VLA. 5907 if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) { 5908 auto *CAT2 = dyn_cast<ConstantArrayType>(AT2); 5909 if (!((CAT2 && CAT1->getSize() == CAT2->getSize()) || 5910 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 5911 isa<IncompleteArrayType>(AT2)))) 5912 return; 5913 } else if (isa<IncompleteArrayType>(AT1)) { 5914 if (!(isa<IncompleteArrayType>(AT2) || 5915 (AllowPiMismatch && getLangOpts().CPlusPlus20 && 5916 isa<ConstantArrayType>(AT2)))) 5917 return; 5918 } else { 5919 return; 5920 } 5921 5922 T1 = AT1->getElementType(); 5923 T2 = AT2->getElementType(); 5924 } 5925 } 5926 5927 /// Attempt to unwrap two types that may be similar (C++ [conv.qual]). 5928 /// 5929 /// If T1 and T2 are both pointer types of the same kind, or both array types 5930 /// with the same bound, unwraps layers from T1 and T2 until a pointer type is 5931 /// unwrapped. Top-level qualifiers on T1 and T2 are ignored. 5932 /// 5933 /// This function will typically be called in a loop that successively 5934 /// "unwraps" pointer and pointer-to-member types to compare them at each 5935 /// level. 5936 /// 5937 /// \param AllowPiMismatch Allow the Pi1 and Pi2 to differ as described in 5938 /// C++20 [conv.qual], if permitted by the current language mode. 5939 /// 5940 /// \return \c true if a pointer type was unwrapped, \c false if we reached a 5941 /// pair of types that can't be unwrapped further. 5942 bool ASTContext::UnwrapSimilarTypes(QualType &T1, QualType &T2, 5943 bool AllowPiMismatch) { 5944 UnwrapSimilarArrayTypes(T1, T2, AllowPiMismatch); 5945 5946 const auto *T1PtrType = T1->getAs<PointerType>(); 5947 const auto *T2PtrType = T2->getAs<PointerType>(); 5948 if (T1PtrType && T2PtrType) { 5949 T1 = T1PtrType->getPointeeType(); 5950 T2 = T2PtrType->getPointeeType(); 5951 return true; 5952 } 5953 5954 const auto *T1MPType = T1->getAs<MemberPointerType>(); 5955 const auto *T2MPType = T2->getAs<MemberPointerType>(); 5956 if (T1MPType && T2MPType && 5957 hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0), 5958 QualType(T2MPType->getClass(), 0))) { 5959 T1 = T1MPType->getPointeeType(); 5960 T2 = T2MPType->getPointeeType(); 5961 return true; 5962 } 5963 5964 if (getLangOpts().ObjC) { 5965 const auto *T1OPType = T1->getAs<ObjCObjectPointerType>(); 5966 const auto *T2OPType = T2->getAs<ObjCObjectPointerType>(); 5967 if (T1OPType && T2OPType) { 5968 T1 = T1OPType->getPointeeType(); 5969 T2 = T2OPType->getPointeeType(); 5970 return true; 5971 } 5972 } 5973 5974 // FIXME: Block pointers, too? 5975 5976 return false; 5977 } 5978 5979 bool ASTContext::hasSimilarType(QualType T1, QualType T2) { 5980 while (true) { 5981 Qualifiers Quals; 5982 T1 = getUnqualifiedArrayType(T1, Quals); 5983 T2 = getUnqualifiedArrayType(T2, Quals); 5984 if (hasSameType(T1, T2)) 5985 return true; 5986 if (!UnwrapSimilarTypes(T1, T2)) 5987 return false; 5988 } 5989 } 5990 5991 bool ASTContext::hasCvrSimilarType(QualType T1, QualType T2) { 5992 while (true) { 5993 Qualifiers Quals1, Quals2; 5994 T1 = getUnqualifiedArrayType(T1, Quals1); 5995 T2 = getUnqualifiedArrayType(T2, Quals2); 5996 5997 Quals1.removeCVRQualifiers(); 5998 Quals2.removeCVRQualifiers(); 5999 if (Quals1 != Quals2) 6000 return false; 6001 6002 if (hasSameType(T1, T2)) 6003 return true; 6004 6005 if (!UnwrapSimilarTypes(T1, T2, /*AllowPiMismatch*/ false)) 6006 return false; 6007 } 6008 } 6009 6010 DeclarationNameInfo 6011 ASTContext::getNameForTemplate(TemplateName Name, 6012 SourceLocation NameLoc) const { 6013 switch (Name.getKind()) { 6014 case TemplateName::QualifiedTemplate: 6015 case TemplateName::Template: 6016 // DNInfo work in progress: CHECKME: what about DNLoc? 6017 return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(), 6018 NameLoc); 6019 6020 case TemplateName::OverloadedTemplate: { 6021 OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate(); 6022 // DNInfo work in progress: CHECKME: what about DNLoc? 6023 return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc); 6024 } 6025 6026 case TemplateName::AssumedTemplate: { 6027 AssumedTemplateStorage *Storage = Name.getAsAssumedTemplateName(); 6028 return DeclarationNameInfo(Storage->getDeclName(), NameLoc); 6029 } 6030 6031 case TemplateName::DependentTemplate: { 6032 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6033 DeclarationName DName; 6034 if (DTN->isIdentifier()) { 6035 DName = DeclarationNames.getIdentifier(DTN->getIdentifier()); 6036 return DeclarationNameInfo(DName, NameLoc); 6037 } else { 6038 DName = DeclarationNames.getCXXOperatorName(DTN->getOperator()); 6039 // DNInfo work in progress: FIXME: source locations? 6040 DeclarationNameLoc DNLoc = 6041 DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange()); 6042 return DeclarationNameInfo(DName, NameLoc, DNLoc); 6043 } 6044 } 6045 6046 case TemplateName::SubstTemplateTemplateParm: { 6047 SubstTemplateTemplateParmStorage *subst 6048 = Name.getAsSubstTemplateTemplateParm(); 6049 return DeclarationNameInfo(subst->getParameter()->getDeclName(), 6050 NameLoc); 6051 } 6052 6053 case TemplateName::SubstTemplateTemplateParmPack: { 6054 SubstTemplateTemplateParmPackStorage *subst 6055 = Name.getAsSubstTemplateTemplateParmPack(); 6056 return DeclarationNameInfo(subst->getParameterPack()->getDeclName(), 6057 NameLoc); 6058 } 6059 } 6060 6061 llvm_unreachable("bad template name kind!"); 6062 } 6063 6064 TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const { 6065 switch (Name.getKind()) { 6066 case TemplateName::QualifiedTemplate: 6067 case TemplateName::Template: { 6068 TemplateDecl *Template = Name.getAsTemplateDecl(); 6069 if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) 6070 Template = getCanonicalTemplateTemplateParmDecl(TTP); 6071 6072 // The canonical template name is the canonical template declaration. 6073 return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl())); 6074 } 6075 6076 case TemplateName::OverloadedTemplate: 6077 case TemplateName::AssumedTemplate: 6078 llvm_unreachable("cannot canonicalize unresolved template"); 6079 6080 case TemplateName::DependentTemplate: { 6081 DependentTemplateName *DTN = Name.getAsDependentTemplateName(); 6082 assert(DTN && "Non-dependent template names must refer to template decls."); 6083 return DTN->CanonicalTemplateName; 6084 } 6085 6086 case TemplateName::SubstTemplateTemplateParm: { 6087 SubstTemplateTemplateParmStorage *subst 6088 = Name.getAsSubstTemplateTemplateParm(); 6089 return getCanonicalTemplateName(subst->getReplacement()); 6090 } 6091 6092 case TemplateName::SubstTemplateTemplateParmPack: { 6093 SubstTemplateTemplateParmPackStorage *subst 6094 = Name.getAsSubstTemplateTemplateParmPack(); 6095 TemplateTemplateParmDecl *canonParameter 6096 = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack()); 6097 TemplateArgument canonArgPack 6098 = getCanonicalTemplateArgument(subst->getArgumentPack()); 6099 return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack); 6100 } 6101 } 6102 6103 llvm_unreachable("bad template name!"); 6104 } 6105 6106 bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) { 6107 X = getCanonicalTemplateName(X); 6108 Y = getCanonicalTemplateName(Y); 6109 return X.getAsVoidPointer() == Y.getAsVoidPointer(); 6110 } 6111 6112 TemplateArgument 6113 ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const { 6114 switch (Arg.getKind()) { 6115 case TemplateArgument::Null: 6116 return Arg; 6117 6118 case TemplateArgument::Expression: 6119 return Arg; 6120 6121 case TemplateArgument::Declaration: { 6122 auto *D = cast<ValueDecl>(Arg.getAsDecl()->getCanonicalDecl()); 6123 return TemplateArgument(D, Arg.getParamTypeForDecl()); 6124 } 6125 6126 case TemplateArgument::NullPtr: 6127 return TemplateArgument(getCanonicalType(Arg.getNullPtrType()), 6128 /*isNullPtr*/true); 6129 6130 case TemplateArgument::Template: 6131 return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate())); 6132 6133 case TemplateArgument::TemplateExpansion: 6134 return TemplateArgument(getCanonicalTemplateName( 6135 Arg.getAsTemplateOrTemplatePattern()), 6136 Arg.getNumTemplateExpansions()); 6137 6138 case TemplateArgument::Integral: 6139 return TemplateArgument(Arg, getCanonicalType(Arg.getIntegralType())); 6140 6141 case TemplateArgument::Type: 6142 return TemplateArgument(getCanonicalType(Arg.getAsType())); 6143 6144 case TemplateArgument::Pack: { 6145 if (Arg.pack_size() == 0) 6146 return Arg; 6147 6148 auto *CanonArgs = new (*this) TemplateArgument[Arg.pack_size()]; 6149 unsigned Idx = 0; 6150 for (TemplateArgument::pack_iterator A = Arg.pack_begin(), 6151 AEnd = Arg.pack_end(); 6152 A != AEnd; (void)++A, ++Idx) 6153 CanonArgs[Idx] = getCanonicalTemplateArgument(*A); 6154 6155 return TemplateArgument(llvm::makeArrayRef(CanonArgs, Arg.pack_size())); 6156 } 6157 } 6158 6159 // Silence GCC warning 6160 llvm_unreachable("Unhandled template argument kind"); 6161 } 6162 6163 NestedNameSpecifier * 6164 ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const { 6165 if (!NNS) 6166 return nullptr; 6167 6168 switch (NNS->getKind()) { 6169 case NestedNameSpecifier::Identifier: 6170 // Canonicalize the prefix but keep the identifier the same. 6171 return NestedNameSpecifier::Create(*this, 6172 getCanonicalNestedNameSpecifier(NNS->getPrefix()), 6173 NNS->getAsIdentifier()); 6174 6175 case NestedNameSpecifier::Namespace: 6176 // A namespace is canonical; build a nested-name-specifier with 6177 // this namespace and no prefix. 6178 return NestedNameSpecifier::Create(*this, nullptr, 6179 NNS->getAsNamespace()->getOriginalNamespace()); 6180 6181 case NestedNameSpecifier::NamespaceAlias: 6182 // A namespace is canonical; build a nested-name-specifier with 6183 // this namespace and no prefix. 6184 return NestedNameSpecifier::Create(*this, nullptr, 6185 NNS->getAsNamespaceAlias()->getNamespace() 6186 ->getOriginalNamespace()); 6187 6188 // The difference between TypeSpec and TypeSpecWithTemplate is that the 6189 // latter will have the 'template' keyword when printed. 6190 case NestedNameSpecifier::TypeSpec: 6191 case NestedNameSpecifier::TypeSpecWithTemplate: { 6192 const Type *T = getCanonicalType(NNS->getAsType()); 6193 6194 // If we have some kind of dependent-named type (e.g., "typename T::type"), 6195 // break it apart into its prefix and identifier, then reconsititute those 6196 // as the canonical nested-name-specifier. This is required to canonicalize 6197 // a dependent nested-name-specifier involving typedefs of dependent-name 6198 // types, e.g., 6199 // typedef typename T::type T1; 6200 // typedef typename T1::type T2; 6201 if (const auto *DNT = T->getAs<DependentNameType>()) 6202 return NestedNameSpecifier::Create( 6203 *this, DNT->getQualifier(), 6204 const_cast<IdentifierInfo *>(DNT->getIdentifier())); 6205 if (const auto *DTST = T->getAs<DependentTemplateSpecializationType>()) 6206 return NestedNameSpecifier::Create(*this, DTST->getQualifier(), true, 6207 const_cast<Type *>(T)); 6208 6209 // TODO: Set 'Template' parameter to true for other template types. 6210 return NestedNameSpecifier::Create(*this, nullptr, false, 6211 const_cast<Type *>(T)); 6212 } 6213 6214 case NestedNameSpecifier::Global: 6215 case NestedNameSpecifier::Super: 6216 // The global specifier and __super specifer are canonical and unique. 6217 return NNS; 6218 } 6219 6220 llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); 6221 } 6222 6223 const ArrayType *ASTContext::getAsArrayType(QualType T) const { 6224 // Handle the non-qualified case efficiently. 6225 if (!T.hasLocalQualifiers()) { 6226 // Handle the common positive case fast. 6227 if (const auto *AT = dyn_cast<ArrayType>(T)) 6228 return AT; 6229 } 6230 6231 // Handle the common negative case fast. 6232 if (!isa<ArrayType>(T.getCanonicalType())) 6233 return nullptr; 6234 6235 // Apply any qualifiers from the array type to the element type. This 6236 // implements C99 6.7.3p8: "If the specification of an array type includes 6237 // any type qualifiers, the element type is so qualified, not the array type." 6238 6239 // If we get here, we either have type qualifiers on the type, or we have 6240 // sugar such as a typedef in the way. If we have type qualifiers on the type 6241 // we must propagate them down into the element type. 6242 6243 SplitQualType split = T.getSplitDesugaredType(); 6244 Qualifiers qs = split.Quals; 6245 6246 // If we have a simple case, just return now. 6247 const auto *ATy = dyn_cast<ArrayType>(split.Ty); 6248 if (!ATy || qs.empty()) 6249 return ATy; 6250 6251 // Otherwise, we have an array and we have qualifiers on it. Push the 6252 // qualifiers into the array element type and return a new array type. 6253 QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs); 6254 6255 if (const auto *CAT = dyn_cast<ConstantArrayType>(ATy)) 6256 return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(), 6257 CAT->getSizeExpr(), 6258 CAT->getSizeModifier(), 6259 CAT->getIndexTypeCVRQualifiers())); 6260 if (const auto *IAT = dyn_cast<IncompleteArrayType>(ATy)) 6261 return cast<ArrayType>(getIncompleteArrayType(NewEltTy, 6262 IAT->getSizeModifier(), 6263 IAT->getIndexTypeCVRQualifiers())); 6264 6265 if (const auto *DSAT = dyn_cast<DependentSizedArrayType>(ATy)) 6266 return cast<ArrayType>( 6267 getDependentSizedArrayType(NewEltTy, 6268 DSAT->getSizeExpr(), 6269 DSAT->getSizeModifier(), 6270 DSAT->getIndexTypeCVRQualifiers(), 6271 DSAT->getBracketsRange())); 6272 6273 const auto *VAT = cast<VariableArrayType>(ATy); 6274 return cast<ArrayType>(getVariableArrayType(NewEltTy, 6275 VAT->getSizeExpr(), 6276 VAT->getSizeModifier(), 6277 VAT->getIndexTypeCVRQualifiers(), 6278 VAT->getBracketsRange())); 6279 } 6280 6281 QualType ASTContext::getAdjustedParameterType(QualType T) const { 6282 if (T->isArrayType() || T->isFunctionType()) 6283 return getDecayedType(T); 6284 return T; 6285 } 6286 6287 QualType ASTContext::getSignatureParameterType(QualType T) const { 6288 T = getVariableArrayDecayedType(T); 6289 T = getAdjustedParameterType(T); 6290 return T.getUnqualifiedType(); 6291 } 6292 6293 QualType ASTContext::getExceptionObjectType(QualType T) const { 6294 // C++ [except.throw]p3: 6295 // A throw-expression initializes a temporary object, called the exception 6296 // object, the type of which is determined by removing any top-level 6297 // cv-qualifiers from the static type of the operand of throw and adjusting 6298 // the type from "array of T" or "function returning T" to "pointer to T" 6299 // or "pointer to function returning T", [...] 6300 T = getVariableArrayDecayedType(T); 6301 if (T->isArrayType() || T->isFunctionType()) 6302 T = getDecayedType(T); 6303 return T.getUnqualifiedType(); 6304 } 6305 6306 /// getArrayDecayedType - Return the properly qualified result of decaying the 6307 /// specified array type to a pointer. This operation is non-trivial when 6308 /// handling typedefs etc. The canonical type of "T" must be an array type, 6309 /// this returns a pointer to a properly qualified element of the array. 6310 /// 6311 /// See C99 6.7.5.3p7 and C99 6.3.2.1p3. 6312 QualType ASTContext::getArrayDecayedType(QualType Ty) const { 6313 // Get the element type with 'getAsArrayType' so that we don't lose any 6314 // typedefs in the element type of the array. This also handles propagation 6315 // of type qualifiers from the array type into the element type if present 6316 // (C99 6.7.3p8). 6317 const ArrayType *PrettyArrayType = getAsArrayType(Ty); 6318 assert(PrettyArrayType && "Not an array type!"); 6319 6320 QualType PtrTy = getPointerType(PrettyArrayType->getElementType()); 6321 6322 // int x[restrict 4] -> int *restrict 6323 QualType Result = getQualifiedType(PtrTy, 6324 PrettyArrayType->getIndexTypeQualifiers()); 6325 6326 // int x[_Nullable] -> int * _Nullable 6327 if (auto Nullability = Ty->getNullability(*this)) { 6328 Result = const_cast<ASTContext *>(this)->getAttributedType( 6329 AttributedType::getNullabilityAttrKind(*Nullability), Result, Result); 6330 } 6331 return Result; 6332 } 6333 6334 QualType ASTContext::getBaseElementType(const ArrayType *array) const { 6335 return getBaseElementType(array->getElementType()); 6336 } 6337 6338 QualType ASTContext::getBaseElementType(QualType type) const { 6339 Qualifiers qs; 6340 while (true) { 6341 SplitQualType split = type.getSplitDesugaredType(); 6342 const ArrayType *array = split.Ty->getAsArrayTypeUnsafe(); 6343 if (!array) break; 6344 6345 type = array->getElementType(); 6346 qs.addConsistentQualifiers(split.Quals); 6347 } 6348 6349 return getQualifiedType(type, qs); 6350 } 6351 6352 /// getConstantArrayElementCount - Returns number of constant array elements. 6353 uint64_t 6354 ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const { 6355 uint64_t ElementCount = 1; 6356 do { 6357 ElementCount *= CA->getSize().getZExtValue(); 6358 CA = dyn_cast_or_null<ConstantArrayType>( 6359 CA->getElementType()->getAsArrayTypeUnsafe()); 6360 } while (CA); 6361 return ElementCount; 6362 } 6363 6364 /// getFloatingRank - Return a relative rank for floating point types. 6365 /// This routine will assert if passed a built-in type that isn't a float. 6366 static FloatingRank getFloatingRank(QualType T) { 6367 if (const auto *CT = T->getAs<ComplexType>()) 6368 return getFloatingRank(CT->getElementType()); 6369 6370 switch (T->castAs<BuiltinType>()->getKind()) { 6371 default: llvm_unreachable("getFloatingRank(): not a floating type"); 6372 case BuiltinType::Float16: return Float16Rank; 6373 case BuiltinType::Half: return HalfRank; 6374 case BuiltinType::Float: return FloatRank; 6375 case BuiltinType::Double: return DoubleRank; 6376 case BuiltinType::LongDouble: return LongDoubleRank; 6377 case BuiltinType::Float128: return Float128Rank; 6378 case BuiltinType::BFloat16: return BFloat16Rank; 6379 case BuiltinType::Ibm128: return Ibm128Rank; 6380 } 6381 } 6382 6383 /// getFloatingTypeOfSizeWithinDomain - Returns a real floating 6384 /// point or a complex type (based on typeDomain/typeSize). 6385 /// 'typeDomain' is a real floating point or complex type. 6386 /// 'typeSize' is a real floating point or complex type. 6387 QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size, 6388 QualType Domain) const { 6389 FloatingRank EltRank = getFloatingRank(Size); 6390 if (Domain->isComplexType()) { 6391 switch (EltRank) { 6392 case BFloat16Rank: llvm_unreachable("Complex bfloat16 is not supported"); 6393 case Float16Rank: 6394 case HalfRank: llvm_unreachable("Complex half is not supported"); 6395 case Ibm128Rank: return getComplexType(Ibm128Ty); 6396 case FloatRank: return getComplexType(FloatTy); 6397 case DoubleRank: return getComplexType(DoubleTy); 6398 case LongDoubleRank: return getComplexType(LongDoubleTy); 6399 case Float128Rank: return getComplexType(Float128Ty); 6400 } 6401 } 6402 6403 assert(Domain->isRealFloatingType() && "Unknown domain!"); 6404 switch (EltRank) { 6405 case Float16Rank: return HalfTy; 6406 case BFloat16Rank: return BFloat16Ty; 6407 case HalfRank: return HalfTy; 6408 case FloatRank: return FloatTy; 6409 case DoubleRank: return DoubleTy; 6410 case LongDoubleRank: return LongDoubleTy; 6411 case Float128Rank: return Float128Ty; 6412 case Ibm128Rank: 6413 return Ibm128Ty; 6414 } 6415 llvm_unreachable("getFloatingRank(): illegal value for rank"); 6416 } 6417 6418 /// getFloatingTypeOrder - Compare the rank of the two specified floating 6419 /// point types, ignoring the domain of the type (i.e. 'double' == 6420 /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If 6421 /// LHS < RHS, return -1. 6422 int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const { 6423 FloatingRank LHSR = getFloatingRank(LHS); 6424 FloatingRank RHSR = getFloatingRank(RHS); 6425 6426 if (LHSR == RHSR) 6427 return 0; 6428 if (LHSR > RHSR) 6429 return 1; 6430 return -1; 6431 } 6432 6433 int ASTContext::getFloatingTypeSemanticOrder(QualType LHS, QualType RHS) const { 6434 if (&getFloatTypeSemantics(LHS) == &getFloatTypeSemantics(RHS)) 6435 return 0; 6436 return getFloatingTypeOrder(LHS, RHS); 6437 } 6438 6439 /// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This 6440 /// routine will assert if passed a built-in type that isn't an integer or enum, 6441 /// or if it is not canonicalized. 6442 unsigned ASTContext::getIntegerRank(const Type *T) const { 6443 assert(T->isCanonicalUnqualified() && "T should be canonicalized"); 6444 6445 // Results in this 'losing' to any type of the same size, but winning if 6446 // larger. 6447 if (const auto *EIT = dyn_cast<ExtIntType>(T)) 6448 return 0 + (EIT->getNumBits() << 3); 6449 6450 switch (cast<BuiltinType>(T)->getKind()) { 6451 default: llvm_unreachable("getIntegerRank(): not a built-in integer"); 6452 case BuiltinType::Bool: 6453 return 1 + (getIntWidth(BoolTy) << 3); 6454 case BuiltinType::Char_S: 6455 case BuiltinType::Char_U: 6456 case BuiltinType::SChar: 6457 case BuiltinType::UChar: 6458 return 2 + (getIntWidth(CharTy) << 3); 6459 case BuiltinType::Short: 6460 case BuiltinType::UShort: 6461 return 3 + (getIntWidth(ShortTy) << 3); 6462 case BuiltinType::Int: 6463 case BuiltinType::UInt: 6464 return 4 + (getIntWidth(IntTy) << 3); 6465 case BuiltinType::Long: 6466 case BuiltinType::ULong: 6467 return 5 + (getIntWidth(LongTy) << 3); 6468 case BuiltinType::LongLong: 6469 case BuiltinType::ULongLong: 6470 return 6 + (getIntWidth(LongLongTy) << 3); 6471 case BuiltinType::Int128: 6472 case BuiltinType::UInt128: 6473 return 7 + (getIntWidth(Int128Ty) << 3); 6474 } 6475 } 6476 6477 /// Whether this is a promotable bitfield reference according 6478 /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions). 6479 /// 6480 /// \returns the type this bit-field will promote to, or NULL if no 6481 /// promotion occurs. 6482 QualType ASTContext::isPromotableBitField(Expr *E) const { 6483 if (E->isTypeDependent() || E->isValueDependent()) 6484 return {}; 6485 6486 // C++ [conv.prom]p5: 6487 // If the bit-field has an enumerated type, it is treated as any other 6488 // value of that type for promotion purposes. 6489 if (getLangOpts().CPlusPlus && E->getType()->isEnumeralType()) 6490 return {}; 6491 6492 // FIXME: We should not do this unless E->refersToBitField() is true. This 6493 // matters in C where getSourceBitField() will find bit-fields for various 6494 // cases where the source expression is not a bit-field designator. 6495 6496 FieldDecl *Field = E->getSourceBitField(); // FIXME: conditional bit-fields? 6497 if (!Field) 6498 return {}; 6499 6500 QualType FT = Field->getType(); 6501 6502 uint64_t BitWidth = Field->getBitWidthValue(*this); 6503 uint64_t IntSize = getTypeSize(IntTy); 6504 // C++ [conv.prom]p5: 6505 // A prvalue for an integral bit-field can be converted to a prvalue of type 6506 // int if int can represent all the values of the bit-field; otherwise, it 6507 // can be converted to unsigned int if unsigned int can represent all the 6508 // values of the bit-field. If the bit-field is larger yet, no integral 6509 // promotion applies to it. 6510 // C11 6.3.1.1/2: 6511 // [For a bit-field of type _Bool, int, signed int, or unsigned int:] 6512 // If an int can represent all values of the original type (as restricted by 6513 // the width, for a bit-field), the value is converted to an int; otherwise, 6514 // it is converted to an unsigned int. 6515 // 6516 // FIXME: C does not permit promotion of a 'long : 3' bitfield to int. 6517 // We perform that promotion here to match GCC and C++. 6518 // FIXME: C does not permit promotion of an enum bit-field whose rank is 6519 // greater than that of 'int'. We perform that promotion to match GCC. 6520 if (BitWidth < IntSize) 6521 return IntTy; 6522 6523 if (BitWidth == IntSize) 6524 return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy; 6525 6526 // Bit-fields wider than int are not subject to promotions, and therefore act 6527 // like the base type. GCC has some weird bugs in this area that we 6528 // deliberately do not follow (GCC follows a pre-standard resolution to 6529 // C's DR315 which treats bit-width as being part of the type, and this leaks 6530 // into their semantics in some cases). 6531 return {}; 6532 } 6533 6534 /// getPromotedIntegerType - Returns the type that Promotable will 6535 /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable 6536 /// integer type. 6537 QualType ASTContext::getPromotedIntegerType(QualType Promotable) const { 6538 assert(!Promotable.isNull()); 6539 assert(Promotable->isPromotableIntegerType()); 6540 if (const auto *ET = Promotable->getAs<EnumType>()) 6541 return ET->getDecl()->getPromotionType(); 6542 6543 if (const auto *BT = Promotable->getAs<BuiltinType>()) { 6544 // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t 6545 // (3.9.1) can be converted to a prvalue of the first of the following 6546 // types that can represent all the values of its underlying type: 6547 // int, unsigned int, long int, unsigned long int, long long int, or 6548 // unsigned long long int [...] 6549 // FIXME: Is there some better way to compute this? 6550 if (BT->getKind() == BuiltinType::WChar_S || 6551 BT->getKind() == BuiltinType::WChar_U || 6552 BT->getKind() == BuiltinType::Char8 || 6553 BT->getKind() == BuiltinType::Char16 || 6554 BT->getKind() == BuiltinType::Char32) { 6555 bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S; 6556 uint64_t FromSize = getTypeSize(BT); 6557 QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy, 6558 LongLongTy, UnsignedLongLongTy }; 6559 for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) { 6560 uint64_t ToSize = getTypeSize(PromoteTypes[Idx]); 6561 if (FromSize < ToSize || 6562 (FromSize == ToSize && 6563 FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType())) 6564 return PromoteTypes[Idx]; 6565 } 6566 llvm_unreachable("char type should fit into long long"); 6567 } 6568 } 6569 6570 // At this point, we should have a signed or unsigned integer type. 6571 if (Promotable->isSignedIntegerType()) 6572 return IntTy; 6573 uint64_t PromotableSize = getIntWidth(Promotable); 6574 uint64_t IntSize = getIntWidth(IntTy); 6575 assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize); 6576 return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy; 6577 } 6578 6579 /// Recurses in pointer/array types until it finds an objc retainable 6580 /// type and returns its ownership. 6581 Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const { 6582 while (!T.isNull()) { 6583 if (T.getObjCLifetime() != Qualifiers::OCL_None) 6584 return T.getObjCLifetime(); 6585 if (T->isArrayType()) 6586 T = getBaseElementType(T); 6587 else if (const auto *PT = T->getAs<PointerType>()) 6588 T = PT->getPointeeType(); 6589 else if (const auto *RT = T->getAs<ReferenceType>()) 6590 T = RT->getPointeeType(); 6591 else 6592 break; 6593 } 6594 6595 return Qualifiers::OCL_None; 6596 } 6597 6598 static const Type *getIntegerTypeForEnum(const EnumType *ET) { 6599 // Incomplete enum types are not treated as integer types. 6600 // FIXME: In C++, enum types are never integer types. 6601 if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped()) 6602 return ET->getDecl()->getIntegerType().getTypePtr(); 6603 return nullptr; 6604 } 6605 6606 /// getIntegerTypeOrder - Returns the highest ranked integer type: 6607 /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If 6608 /// LHS < RHS, return -1. 6609 int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const { 6610 const Type *LHSC = getCanonicalType(LHS).getTypePtr(); 6611 const Type *RHSC = getCanonicalType(RHS).getTypePtr(); 6612 6613 // Unwrap enums to their underlying type. 6614 if (const auto *ET = dyn_cast<EnumType>(LHSC)) 6615 LHSC = getIntegerTypeForEnum(ET); 6616 if (const auto *ET = dyn_cast<EnumType>(RHSC)) 6617 RHSC = getIntegerTypeForEnum(ET); 6618 6619 if (LHSC == RHSC) return 0; 6620 6621 bool LHSUnsigned = LHSC->isUnsignedIntegerType(); 6622 bool RHSUnsigned = RHSC->isUnsignedIntegerType(); 6623 6624 unsigned LHSRank = getIntegerRank(LHSC); 6625 unsigned RHSRank = getIntegerRank(RHSC); 6626 6627 if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned. 6628 if (LHSRank == RHSRank) return 0; 6629 return LHSRank > RHSRank ? 1 : -1; 6630 } 6631 6632 // Otherwise, the LHS is signed and the RHS is unsigned or visa versa. 6633 if (LHSUnsigned) { 6634 // If the unsigned [LHS] type is larger, return it. 6635 if (LHSRank >= RHSRank) 6636 return 1; 6637 6638 // If the signed type can represent all values of the unsigned type, it 6639 // wins. Because we are dealing with 2's complement and types that are 6640 // powers of two larger than each other, this is always safe. 6641 return -1; 6642 } 6643 6644 // If the unsigned [RHS] type is larger, return it. 6645 if (RHSRank >= LHSRank) 6646 return -1; 6647 6648 // If the signed type can represent all values of the unsigned type, it 6649 // wins. Because we are dealing with 2's complement and types that are 6650 // powers of two larger than each other, this is always safe. 6651 return 1; 6652 } 6653 6654 TypedefDecl *ASTContext::getCFConstantStringDecl() const { 6655 if (CFConstantStringTypeDecl) 6656 return CFConstantStringTypeDecl; 6657 6658 assert(!CFConstantStringTagDecl && 6659 "tag and typedef should be initialized together"); 6660 CFConstantStringTagDecl = buildImplicitRecord("__NSConstantString_tag"); 6661 CFConstantStringTagDecl->startDefinition(); 6662 6663 struct { 6664 QualType Type; 6665 const char *Name; 6666 } Fields[5]; 6667 unsigned Count = 0; 6668 6669 /// Objective-C ABI 6670 /// 6671 /// typedef struct __NSConstantString_tag { 6672 /// const int *isa; 6673 /// int flags; 6674 /// const char *str; 6675 /// long length; 6676 /// } __NSConstantString; 6677 /// 6678 /// Swift ABI (4.1, 4.2) 6679 /// 6680 /// typedef struct __NSConstantString_tag { 6681 /// uintptr_t _cfisa; 6682 /// uintptr_t _swift_rc; 6683 /// _Atomic(uint64_t) _cfinfoa; 6684 /// const char *_ptr; 6685 /// uint32_t _length; 6686 /// } __NSConstantString; 6687 /// 6688 /// Swift ABI (5.0) 6689 /// 6690 /// typedef struct __NSConstantString_tag { 6691 /// uintptr_t _cfisa; 6692 /// uintptr_t _swift_rc; 6693 /// _Atomic(uint64_t) _cfinfoa; 6694 /// const char *_ptr; 6695 /// uintptr_t _length; 6696 /// } __NSConstantString; 6697 6698 const auto CFRuntime = getLangOpts().CFRuntime; 6699 if (static_cast<unsigned>(CFRuntime) < 6700 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift)) { 6701 Fields[Count++] = { getPointerType(IntTy.withConst()), "isa" }; 6702 Fields[Count++] = { IntTy, "flags" }; 6703 Fields[Count++] = { getPointerType(CharTy.withConst()), "str" }; 6704 Fields[Count++] = { LongTy, "length" }; 6705 } else { 6706 Fields[Count++] = { getUIntPtrType(), "_cfisa" }; 6707 Fields[Count++] = { getUIntPtrType(), "_swift_rc" }; 6708 Fields[Count++] = { getFromTargetType(Target->getUInt64Type()), "_swift_rc" }; 6709 Fields[Count++] = { getPointerType(CharTy.withConst()), "_ptr" }; 6710 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 || 6711 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2) 6712 Fields[Count++] = { IntTy, "_ptr" }; 6713 else 6714 Fields[Count++] = { getUIntPtrType(), "_ptr" }; 6715 } 6716 6717 // Create fields 6718 for (unsigned i = 0; i < Count; ++i) { 6719 FieldDecl *Field = 6720 FieldDecl::Create(*this, CFConstantStringTagDecl, SourceLocation(), 6721 SourceLocation(), &Idents.get(Fields[i].Name), 6722 Fields[i].Type, /*TInfo=*/nullptr, 6723 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 6724 Field->setAccess(AS_public); 6725 CFConstantStringTagDecl->addDecl(Field); 6726 } 6727 6728 CFConstantStringTagDecl->completeDefinition(); 6729 // This type is designed to be compatible with NSConstantString, but cannot 6730 // use the same name, since NSConstantString is an interface. 6731 auto tagType = getTagDeclType(CFConstantStringTagDecl); 6732 CFConstantStringTypeDecl = 6733 buildImplicitTypedef(tagType, "__NSConstantString"); 6734 6735 return CFConstantStringTypeDecl; 6736 } 6737 6738 RecordDecl *ASTContext::getCFConstantStringTagDecl() const { 6739 if (!CFConstantStringTagDecl) 6740 getCFConstantStringDecl(); // Build the tag and the typedef. 6741 return CFConstantStringTagDecl; 6742 } 6743 6744 // getCFConstantStringType - Return the type used for constant CFStrings. 6745 QualType ASTContext::getCFConstantStringType() const { 6746 return getTypedefType(getCFConstantStringDecl()); 6747 } 6748 6749 QualType ASTContext::getObjCSuperType() const { 6750 if (ObjCSuperType.isNull()) { 6751 RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super"); 6752 getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl); 6753 ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl); 6754 } 6755 return ObjCSuperType; 6756 } 6757 6758 void ASTContext::setCFConstantStringType(QualType T) { 6759 const auto *TD = T->castAs<TypedefType>(); 6760 CFConstantStringTypeDecl = cast<TypedefDecl>(TD->getDecl()); 6761 const auto *TagType = 6762 CFConstantStringTypeDecl->getUnderlyingType()->castAs<RecordType>(); 6763 CFConstantStringTagDecl = TagType->getDecl(); 6764 } 6765 6766 QualType ASTContext::getBlockDescriptorType() const { 6767 if (BlockDescriptorType) 6768 return getTagDeclType(BlockDescriptorType); 6769 6770 RecordDecl *RD; 6771 // FIXME: Needs the FlagAppleBlock bit. 6772 RD = buildImplicitRecord("__block_descriptor"); 6773 RD->startDefinition(); 6774 6775 QualType FieldTypes[] = { 6776 UnsignedLongTy, 6777 UnsignedLongTy, 6778 }; 6779 6780 static const char *const FieldNames[] = { 6781 "reserved", 6782 "Size" 6783 }; 6784 6785 for (size_t i = 0; i < 2; ++i) { 6786 FieldDecl *Field = FieldDecl::Create( 6787 *this, RD, SourceLocation(), SourceLocation(), 6788 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 6789 /*BitWidth=*/nullptr, /*Mutable=*/false, ICIS_NoInit); 6790 Field->setAccess(AS_public); 6791 RD->addDecl(Field); 6792 } 6793 6794 RD->completeDefinition(); 6795 6796 BlockDescriptorType = RD; 6797 6798 return getTagDeclType(BlockDescriptorType); 6799 } 6800 6801 QualType ASTContext::getBlockDescriptorExtendedType() const { 6802 if (BlockDescriptorExtendedType) 6803 return getTagDeclType(BlockDescriptorExtendedType); 6804 6805 RecordDecl *RD; 6806 // FIXME: Needs the FlagAppleBlock bit. 6807 RD = buildImplicitRecord("__block_descriptor_withcopydispose"); 6808 RD->startDefinition(); 6809 6810 QualType FieldTypes[] = { 6811 UnsignedLongTy, 6812 UnsignedLongTy, 6813 getPointerType(VoidPtrTy), 6814 getPointerType(VoidPtrTy) 6815 }; 6816 6817 static const char *const FieldNames[] = { 6818 "reserved", 6819 "Size", 6820 "CopyFuncPtr", 6821 "DestroyFuncPtr" 6822 }; 6823 6824 for (size_t i = 0; i < 4; ++i) { 6825 FieldDecl *Field = FieldDecl::Create( 6826 *this, RD, SourceLocation(), SourceLocation(), 6827 &Idents.get(FieldNames[i]), FieldTypes[i], /*TInfo=*/nullptr, 6828 /*BitWidth=*/nullptr, 6829 /*Mutable=*/false, ICIS_NoInit); 6830 Field->setAccess(AS_public); 6831 RD->addDecl(Field); 6832 } 6833 6834 RD->completeDefinition(); 6835 6836 BlockDescriptorExtendedType = RD; 6837 return getTagDeclType(BlockDescriptorExtendedType); 6838 } 6839 6840 OpenCLTypeKind ASTContext::getOpenCLTypeKind(const Type *T) const { 6841 const auto *BT = dyn_cast<BuiltinType>(T); 6842 6843 if (!BT) { 6844 if (isa<PipeType>(T)) 6845 return OCLTK_Pipe; 6846 6847 return OCLTK_Default; 6848 } 6849 6850 switch (BT->getKind()) { 6851 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 6852 case BuiltinType::Id: \ 6853 return OCLTK_Image; 6854 #include "clang/Basic/OpenCLImageTypes.def" 6855 6856 case BuiltinType::OCLClkEvent: 6857 return OCLTK_ClkEvent; 6858 6859 case BuiltinType::OCLEvent: 6860 return OCLTK_Event; 6861 6862 case BuiltinType::OCLQueue: 6863 return OCLTK_Queue; 6864 6865 case BuiltinType::OCLReserveID: 6866 return OCLTK_ReserveID; 6867 6868 case BuiltinType::OCLSampler: 6869 return OCLTK_Sampler; 6870 6871 default: 6872 return OCLTK_Default; 6873 } 6874 } 6875 6876 LangAS ASTContext::getOpenCLTypeAddrSpace(const Type *T) const { 6877 return Target->getOpenCLTypeAddrSpace(getOpenCLTypeKind(T)); 6878 } 6879 6880 /// BlockRequiresCopying - Returns true if byref variable "D" of type "Ty" 6881 /// requires copy/dispose. Note that this must match the logic 6882 /// in buildByrefHelpers. 6883 bool ASTContext::BlockRequiresCopying(QualType Ty, 6884 const VarDecl *D) { 6885 if (const CXXRecordDecl *record = Ty->getAsCXXRecordDecl()) { 6886 const Expr *copyExpr = getBlockVarCopyInit(D).getCopyExpr(); 6887 if (!copyExpr && record->hasTrivialDestructor()) return false; 6888 6889 return true; 6890 } 6891 6892 // The block needs copy/destroy helpers if Ty is non-trivial to destructively 6893 // move or destroy. 6894 if (Ty.isNonTrivialToPrimitiveDestructiveMove() || Ty.isDestructedType()) 6895 return true; 6896 6897 if (!Ty->isObjCRetainableType()) return false; 6898 6899 Qualifiers qs = Ty.getQualifiers(); 6900 6901 // If we have lifetime, that dominates. 6902 if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { 6903 switch (lifetime) { 6904 case Qualifiers::OCL_None: llvm_unreachable("impossible"); 6905 6906 // These are just bits as far as the runtime is concerned. 6907 case Qualifiers::OCL_ExplicitNone: 6908 case Qualifiers::OCL_Autoreleasing: 6909 return false; 6910 6911 // These cases should have been taken care of when checking the type's 6912 // non-triviality. 6913 case Qualifiers::OCL_Weak: 6914 case Qualifiers::OCL_Strong: 6915 llvm_unreachable("impossible"); 6916 } 6917 llvm_unreachable("fell out of lifetime switch!"); 6918 } 6919 return (Ty->isBlockPointerType() || isObjCNSObjectType(Ty) || 6920 Ty->isObjCObjectPointerType()); 6921 } 6922 6923 bool ASTContext::getByrefLifetime(QualType Ty, 6924 Qualifiers::ObjCLifetime &LifeTime, 6925 bool &HasByrefExtendedLayout) const { 6926 if (!getLangOpts().ObjC || 6927 getLangOpts().getGC() != LangOptions::NonGC) 6928 return false; 6929 6930 HasByrefExtendedLayout = false; 6931 if (Ty->isRecordType()) { 6932 HasByrefExtendedLayout = true; 6933 LifeTime = Qualifiers::OCL_None; 6934 } else if ((LifeTime = Ty.getObjCLifetime())) { 6935 // Honor the ARC qualifiers. 6936 } else if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) { 6937 // The MRR rule. 6938 LifeTime = Qualifiers::OCL_ExplicitNone; 6939 } else { 6940 LifeTime = Qualifiers::OCL_None; 6941 } 6942 return true; 6943 } 6944 6945 CanQualType ASTContext::getNSUIntegerType() const { 6946 assert(Target && "Expected target to be initialized"); 6947 const llvm::Triple &T = Target->getTriple(); 6948 // Windows is LLP64 rather than LP64 6949 if (T.isOSWindows() && T.isArch64Bit()) 6950 return UnsignedLongLongTy; 6951 return UnsignedLongTy; 6952 } 6953 6954 CanQualType ASTContext::getNSIntegerType() const { 6955 assert(Target && "Expected target to be initialized"); 6956 const llvm::Triple &T = Target->getTriple(); 6957 // Windows is LLP64 rather than LP64 6958 if (T.isOSWindows() && T.isArch64Bit()) 6959 return LongLongTy; 6960 return LongTy; 6961 } 6962 6963 TypedefDecl *ASTContext::getObjCInstanceTypeDecl() { 6964 if (!ObjCInstanceTypeDecl) 6965 ObjCInstanceTypeDecl = 6966 buildImplicitTypedef(getObjCIdType(), "instancetype"); 6967 return ObjCInstanceTypeDecl; 6968 } 6969 6970 // This returns true if a type has been typedefed to BOOL: 6971 // typedef <type> BOOL; 6972 static bool isTypeTypedefedAsBOOL(QualType T) { 6973 if (const auto *TT = dyn_cast<TypedefType>(T)) 6974 if (IdentifierInfo *II = TT->getDecl()->getIdentifier()) 6975 return II->isStr("BOOL"); 6976 6977 return false; 6978 } 6979 6980 /// getObjCEncodingTypeSize returns size of type for objective-c encoding 6981 /// purpose. 6982 CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const { 6983 if (!type->isIncompleteArrayType() && type->isIncompleteType()) 6984 return CharUnits::Zero(); 6985 6986 CharUnits sz = getTypeSizeInChars(type); 6987 6988 // Make all integer and enum types at least as large as an int 6989 if (sz.isPositive() && type->isIntegralOrEnumerationType()) 6990 sz = std::max(sz, getTypeSizeInChars(IntTy)); 6991 // Treat arrays as pointers, since that's how they're passed in. 6992 else if (type->isArrayType()) 6993 sz = getTypeSizeInChars(VoidPtrTy); 6994 return sz; 6995 } 6996 6997 bool ASTContext::isMSStaticDataMemberInlineDefinition(const VarDecl *VD) const { 6998 return getTargetInfo().getCXXABI().isMicrosoft() && 6999 VD->isStaticDataMember() && 7000 VD->getType()->isIntegralOrEnumerationType() && 7001 !VD->getFirstDecl()->isOutOfLine() && VD->getFirstDecl()->hasInit(); 7002 } 7003 7004 ASTContext::InlineVariableDefinitionKind 7005 ASTContext::getInlineVariableDefinitionKind(const VarDecl *VD) const { 7006 if (!VD->isInline()) 7007 return InlineVariableDefinitionKind::None; 7008 7009 // In almost all cases, it's a weak definition. 7010 auto *First = VD->getFirstDecl(); 7011 if (First->isInlineSpecified() || !First->isStaticDataMember()) 7012 return InlineVariableDefinitionKind::Weak; 7013 7014 // If there's a file-context declaration in this translation unit, it's a 7015 // non-discardable definition. 7016 for (auto *D : VD->redecls()) 7017 if (D->getLexicalDeclContext()->isFileContext() && 7018 !D->isInlineSpecified() && (D->isConstexpr() || First->isConstexpr())) 7019 return InlineVariableDefinitionKind::Strong; 7020 7021 // If we've not seen one yet, we don't know. 7022 return InlineVariableDefinitionKind::WeakUnknown; 7023 } 7024 7025 static std::string charUnitsToString(const CharUnits &CU) { 7026 return llvm::itostr(CU.getQuantity()); 7027 } 7028 7029 /// getObjCEncodingForBlock - Return the encoded type for this block 7030 /// declaration. 7031 std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const { 7032 std::string S; 7033 7034 const BlockDecl *Decl = Expr->getBlockDecl(); 7035 QualType BlockTy = 7036 Expr->getType()->castAs<BlockPointerType>()->getPointeeType(); 7037 QualType BlockReturnTy = BlockTy->castAs<FunctionType>()->getReturnType(); 7038 // Encode result type. 7039 if (getLangOpts().EncodeExtendedBlockSig) 7040 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, BlockReturnTy, S, 7041 true /*Extended*/); 7042 else 7043 getObjCEncodingForType(BlockReturnTy, S); 7044 // Compute size of all parameters. 7045 // Start with computing size of a pointer in number of bytes. 7046 // FIXME: There might(should) be a better way of doing this computation! 7047 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7048 CharUnits ParmOffset = PtrSize; 7049 for (auto PI : Decl->parameters()) { 7050 QualType PType = PI->getType(); 7051 CharUnits sz = getObjCEncodingTypeSize(PType); 7052 if (sz.isZero()) 7053 continue; 7054 assert(sz.isPositive() && "BlockExpr - Incomplete param type"); 7055 ParmOffset += sz; 7056 } 7057 // Size of the argument frame 7058 S += charUnitsToString(ParmOffset); 7059 // Block pointer and offset. 7060 S += "@?0"; 7061 7062 // Argument types. 7063 ParmOffset = PtrSize; 7064 for (auto PVDecl : Decl->parameters()) { 7065 QualType PType = PVDecl->getOriginalType(); 7066 if (const auto *AT = 7067 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7068 // Use array's original type only if it has known number of 7069 // elements. 7070 if (!isa<ConstantArrayType>(AT)) 7071 PType = PVDecl->getType(); 7072 } else if (PType->isFunctionType()) 7073 PType = PVDecl->getType(); 7074 if (getLangOpts().EncodeExtendedBlockSig) 7075 getObjCEncodingForMethodParameter(Decl::OBJC_TQ_None, PType, 7076 S, true /*Extended*/); 7077 else 7078 getObjCEncodingForType(PType, S); 7079 S += charUnitsToString(ParmOffset); 7080 ParmOffset += getObjCEncodingTypeSize(PType); 7081 } 7082 7083 return S; 7084 } 7085 7086 std::string 7087 ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const { 7088 std::string S; 7089 // Encode result type. 7090 getObjCEncodingForType(Decl->getReturnType(), S); 7091 CharUnits ParmOffset; 7092 // Compute size of all parameters. 7093 for (auto PI : Decl->parameters()) { 7094 QualType PType = PI->getType(); 7095 CharUnits sz = getObjCEncodingTypeSize(PType); 7096 if (sz.isZero()) 7097 continue; 7098 7099 assert(sz.isPositive() && 7100 "getObjCEncodingForFunctionDecl - Incomplete param type"); 7101 ParmOffset += sz; 7102 } 7103 S += charUnitsToString(ParmOffset); 7104 ParmOffset = CharUnits::Zero(); 7105 7106 // Argument types. 7107 for (auto PVDecl : Decl->parameters()) { 7108 QualType PType = PVDecl->getOriginalType(); 7109 if (const auto *AT = 7110 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7111 // Use array's original type only if it has known number of 7112 // elements. 7113 if (!isa<ConstantArrayType>(AT)) 7114 PType = PVDecl->getType(); 7115 } else if (PType->isFunctionType()) 7116 PType = PVDecl->getType(); 7117 getObjCEncodingForType(PType, S); 7118 S += charUnitsToString(ParmOffset); 7119 ParmOffset += getObjCEncodingTypeSize(PType); 7120 } 7121 7122 return S; 7123 } 7124 7125 /// getObjCEncodingForMethodParameter - Return the encoded type for a single 7126 /// method parameter or return type. If Extended, include class names and 7127 /// block object types. 7128 void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT, 7129 QualType T, std::string& S, 7130 bool Extended) const { 7131 // Encode type qualifier, 'in', 'inout', etc. for the parameter. 7132 getObjCEncodingForTypeQualifier(QT, S); 7133 // Encode parameter type. 7134 ObjCEncOptions Options = ObjCEncOptions() 7135 .setExpandPointedToStructures() 7136 .setExpandStructures() 7137 .setIsOutermostType(); 7138 if (Extended) 7139 Options.setEncodeBlockParameters().setEncodeClassNames(); 7140 getObjCEncodingForTypeImpl(T, S, Options, /*Field=*/nullptr); 7141 } 7142 7143 /// getObjCEncodingForMethodDecl - Return the encoded type for this method 7144 /// declaration. 7145 std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, 7146 bool Extended) const { 7147 // FIXME: This is not very efficient. 7148 // Encode return type. 7149 std::string S; 7150 getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(), 7151 Decl->getReturnType(), S, Extended); 7152 // Compute size of all parameters. 7153 // Start with computing size of a pointer in number of bytes. 7154 // FIXME: There might(should) be a better way of doing this computation! 7155 CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy); 7156 // The first two arguments (self and _cmd) are pointers; account for 7157 // their size. 7158 CharUnits ParmOffset = 2 * PtrSize; 7159 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7160 E = Decl->sel_param_end(); PI != E; ++PI) { 7161 QualType PType = (*PI)->getType(); 7162 CharUnits sz = getObjCEncodingTypeSize(PType); 7163 if (sz.isZero()) 7164 continue; 7165 7166 assert(sz.isPositive() && 7167 "getObjCEncodingForMethodDecl - Incomplete param type"); 7168 ParmOffset += sz; 7169 } 7170 S += charUnitsToString(ParmOffset); 7171 S += "@0:"; 7172 S += charUnitsToString(PtrSize); 7173 7174 // Argument types. 7175 ParmOffset = 2 * PtrSize; 7176 for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(), 7177 E = Decl->sel_param_end(); PI != E; ++PI) { 7178 const ParmVarDecl *PVDecl = *PI; 7179 QualType PType = PVDecl->getOriginalType(); 7180 if (const auto *AT = 7181 dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) { 7182 // Use array's original type only if it has known number of 7183 // elements. 7184 if (!isa<ConstantArrayType>(AT)) 7185 PType = PVDecl->getType(); 7186 } else if (PType->isFunctionType()) 7187 PType = PVDecl->getType(); 7188 getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(), 7189 PType, S, Extended); 7190 S += charUnitsToString(ParmOffset); 7191 ParmOffset += getObjCEncodingTypeSize(PType); 7192 } 7193 7194 return S; 7195 } 7196 7197 ObjCPropertyImplDecl * 7198 ASTContext::getObjCPropertyImplDeclForPropertyDecl( 7199 const ObjCPropertyDecl *PD, 7200 const Decl *Container) const { 7201 if (!Container) 7202 return nullptr; 7203 if (const auto *CID = dyn_cast<ObjCCategoryImplDecl>(Container)) { 7204 for (auto *PID : CID->property_impls()) 7205 if (PID->getPropertyDecl() == PD) 7206 return PID; 7207 } else { 7208 const auto *OID = cast<ObjCImplementationDecl>(Container); 7209 for (auto *PID : OID->property_impls()) 7210 if (PID->getPropertyDecl() == PD) 7211 return PID; 7212 } 7213 return nullptr; 7214 } 7215 7216 /// getObjCEncodingForPropertyDecl - Return the encoded type for this 7217 /// property declaration. If non-NULL, Container must be either an 7218 /// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be 7219 /// NULL when getting encodings for protocol properties. 7220 /// Property attributes are stored as a comma-delimited C string. The simple 7221 /// attributes readonly and bycopy are encoded as single characters. The 7222 /// parametrized attributes, getter=name, setter=name, and ivar=name, are 7223 /// encoded as single characters, followed by an identifier. Property types 7224 /// are also encoded as a parametrized attribute. The characters used to encode 7225 /// these attributes are defined by the following enumeration: 7226 /// @code 7227 /// enum PropertyAttributes { 7228 /// kPropertyReadOnly = 'R', // property is read-only. 7229 /// kPropertyBycopy = 'C', // property is a copy of the value last assigned 7230 /// kPropertyByref = '&', // property is a reference to the value last assigned 7231 /// kPropertyDynamic = 'D', // property is dynamic 7232 /// kPropertyGetter = 'G', // followed by getter selector name 7233 /// kPropertySetter = 'S', // followed by setter selector name 7234 /// kPropertyInstanceVariable = 'V' // followed by instance variable name 7235 /// kPropertyType = 'T' // followed by old-style type encoding. 7236 /// kPropertyWeak = 'W' // 'weak' property 7237 /// kPropertyStrong = 'P' // property GC'able 7238 /// kPropertyNonAtomic = 'N' // property non-atomic 7239 /// }; 7240 /// @endcode 7241 std::string 7242 ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD, 7243 const Decl *Container) const { 7244 // Collect information from the property implementation decl(s). 7245 bool Dynamic = false; 7246 ObjCPropertyImplDecl *SynthesizePID = nullptr; 7247 7248 if (ObjCPropertyImplDecl *PropertyImpDecl = 7249 getObjCPropertyImplDeclForPropertyDecl(PD, Container)) { 7250 if (PropertyImpDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) 7251 Dynamic = true; 7252 else 7253 SynthesizePID = PropertyImpDecl; 7254 } 7255 7256 // FIXME: This is not very efficient. 7257 std::string S = "T"; 7258 7259 // Encode result type. 7260 // GCC has some special rules regarding encoding of properties which 7261 // closely resembles encoding of ivars. 7262 getObjCEncodingForPropertyType(PD->getType(), S); 7263 7264 if (PD->isReadOnly()) { 7265 S += ",R"; 7266 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_copy) 7267 S += ",C"; 7268 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_retain) 7269 S += ",&"; 7270 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak) 7271 S += ",W"; 7272 } else { 7273 switch (PD->getSetterKind()) { 7274 case ObjCPropertyDecl::Assign: break; 7275 case ObjCPropertyDecl::Copy: S += ",C"; break; 7276 case ObjCPropertyDecl::Retain: S += ",&"; break; 7277 case ObjCPropertyDecl::Weak: S += ",W"; break; 7278 } 7279 } 7280 7281 // It really isn't clear at all what this means, since properties 7282 // are "dynamic by default". 7283 if (Dynamic) 7284 S += ",D"; 7285 7286 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_nonatomic) 7287 S += ",N"; 7288 7289 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_getter) { 7290 S += ",G"; 7291 S += PD->getGetterName().getAsString(); 7292 } 7293 7294 if (PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_setter) { 7295 S += ",S"; 7296 S += PD->getSetterName().getAsString(); 7297 } 7298 7299 if (SynthesizePID) { 7300 const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl(); 7301 S += ",V"; 7302 S += OID->getNameAsString(); 7303 } 7304 7305 // FIXME: OBJCGC: weak & strong 7306 return S; 7307 } 7308 7309 /// getLegacyIntegralTypeEncoding - 7310 /// Another legacy compatibility encoding: 32-bit longs are encoded as 7311 /// 'l' or 'L' , but not always. For typedefs, we need to use 7312 /// 'i' or 'I' instead if encoding a struct field, or a pointer! 7313 void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const { 7314 if (isa<TypedefType>(PointeeTy.getTypePtr())) { 7315 if (const auto *BT = PointeeTy->getAs<BuiltinType>()) { 7316 if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32) 7317 PointeeTy = UnsignedIntTy; 7318 else 7319 if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32) 7320 PointeeTy = IntTy; 7321 } 7322 } 7323 } 7324 7325 void ASTContext::getObjCEncodingForType(QualType T, std::string& S, 7326 const FieldDecl *Field, 7327 QualType *NotEncodedT) const { 7328 // We follow the behavior of gcc, expanding structures which are 7329 // directly pointed to, and expanding embedded structures. Note that 7330 // these rules are sufficient to prevent recursive encoding of the 7331 // same type. 7332 getObjCEncodingForTypeImpl(T, S, 7333 ObjCEncOptions() 7334 .setExpandPointedToStructures() 7335 .setExpandStructures() 7336 .setIsOutermostType(), 7337 Field, NotEncodedT); 7338 } 7339 7340 void ASTContext::getObjCEncodingForPropertyType(QualType T, 7341 std::string& S) const { 7342 // Encode result type. 7343 // GCC has some special rules regarding encoding of properties which 7344 // closely resembles encoding of ivars. 7345 getObjCEncodingForTypeImpl(T, S, 7346 ObjCEncOptions() 7347 .setExpandPointedToStructures() 7348 .setExpandStructures() 7349 .setIsOutermostType() 7350 .setEncodingProperty(), 7351 /*Field=*/nullptr); 7352 } 7353 7354 static char getObjCEncodingForPrimitiveType(const ASTContext *C, 7355 const BuiltinType *BT) { 7356 BuiltinType::Kind kind = BT->getKind(); 7357 switch (kind) { 7358 case BuiltinType::Void: return 'v'; 7359 case BuiltinType::Bool: return 'B'; 7360 case BuiltinType::Char8: 7361 case BuiltinType::Char_U: 7362 case BuiltinType::UChar: return 'C'; 7363 case BuiltinType::Char16: 7364 case BuiltinType::UShort: return 'S'; 7365 case BuiltinType::Char32: 7366 case BuiltinType::UInt: return 'I'; 7367 case BuiltinType::ULong: 7368 return C->getTargetInfo().getLongWidth() == 32 ? 'L' : 'Q'; 7369 case BuiltinType::UInt128: return 'T'; 7370 case BuiltinType::ULongLong: return 'Q'; 7371 case BuiltinType::Char_S: 7372 case BuiltinType::SChar: return 'c'; 7373 case BuiltinType::Short: return 's'; 7374 case BuiltinType::WChar_S: 7375 case BuiltinType::WChar_U: 7376 case BuiltinType::Int: return 'i'; 7377 case BuiltinType::Long: 7378 return C->getTargetInfo().getLongWidth() == 32 ? 'l' : 'q'; 7379 case BuiltinType::LongLong: return 'q'; 7380 case BuiltinType::Int128: return 't'; 7381 case BuiltinType::Float: return 'f'; 7382 case BuiltinType::Double: return 'd'; 7383 case BuiltinType::LongDouble: return 'D'; 7384 case BuiltinType::NullPtr: return '*'; // like char* 7385 7386 case BuiltinType::BFloat16: 7387 case BuiltinType::Float16: 7388 case BuiltinType::Float128: 7389 case BuiltinType::Ibm128: 7390 case BuiltinType::Half: 7391 case BuiltinType::ShortAccum: 7392 case BuiltinType::Accum: 7393 case BuiltinType::LongAccum: 7394 case BuiltinType::UShortAccum: 7395 case BuiltinType::UAccum: 7396 case BuiltinType::ULongAccum: 7397 case BuiltinType::ShortFract: 7398 case BuiltinType::Fract: 7399 case BuiltinType::LongFract: 7400 case BuiltinType::UShortFract: 7401 case BuiltinType::UFract: 7402 case BuiltinType::ULongFract: 7403 case BuiltinType::SatShortAccum: 7404 case BuiltinType::SatAccum: 7405 case BuiltinType::SatLongAccum: 7406 case BuiltinType::SatUShortAccum: 7407 case BuiltinType::SatUAccum: 7408 case BuiltinType::SatULongAccum: 7409 case BuiltinType::SatShortFract: 7410 case BuiltinType::SatFract: 7411 case BuiltinType::SatLongFract: 7412 case BuiltinType::SatUShortFract: 7413 case BuiltinType::SatUFract: 7414 case BuiltinType::SatULongFract: 7415 // FIXME: potentially need @encodes for these! 7416 return ' '; 7417 7418 #define SVE_TYPE(Name, Id, SingletonId) \ 7419 case BuiltinType::Id: 7420 #include "clang/Basic/AArch64SVEACLETypes.def" 7421 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 7422 #include "clang/Basic/RISCVVTypes.def" 7423 { 7424 DiagnosticsEngine &Diags = C->getDiagnostics(); 7425 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error, 7426 "cannot yet @encode type %0"); 7427 Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy()); 7428 return ' '; 7429 } 7430 7431 case BuiltinType::ObjCId: 7432 case BuiltinType::ObjCClass: 7433 case BuiltinType::ObjCSel: 7434 llvm_unreachable("@encoding ObjC primitive type"); 7435 7436 // OpenCL and placeholder types don't need @encodings. 7437 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 7438 case BuiltinType::Id: 7439 #include "clang/Basic/OpenCLImageTypes.def" 7440 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 7441 case BuiltinType::Id: 7442 #include "clang/Basic/OpenCLExtensionTypes.def" 7443 case BuiltinType::OCLEvent: 7444 case BuiltinType::OCLClkEvent: 7445 case BuiltinType::OCLQueue: 7446 case BuiltinType::OCLReserveID: 7447 case BuiltinType::OCLSampler: 7448 case BuiltinType::Dependent: 7449 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 7450 case BuiltinType::Id: 7451 #include "clang/Basic/PPCTypes.def" 7452 #define BUILTIN_TYPE(KIND, ID) 7453 #define PLACEHOLDER_TYPE(KIND, ID) \ 7454 case BuiltinType::KIND: 7455 #include "clang/AST/BuiltinTypes.def" 7456 llvm_unreachable("invalid builtin type for @encode"); 7457 } 7458 llvm_unreachable("invalid BuiltinType::Kind value"); 7459 } 7460 7461 static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) { 7462 EnumDecl *Enum = ET->getDecl(); 7463 7464 // The encoding of an non-fixed enum type is always 'i', regardless of size. 7465 if (!Enum->isFixed()) 7466 return 'i'; 7467 7468 // The encoding of a fixed enum type matches its fixed underlying type. 7469 const auto *BT = Enum->getIntegerType()->castAs<BuiltinType>(); 7470 return getObjCEncodingForPrimitiveType(C, BT); 7471 } 7472 7473 static void EncodeBitField(const ASTContext *Ctx, std::string& S, 7474 QualType T, const FieldDecl *FD) { 7475 assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl"); 7476 S += 'b'; 7477 // The NeXT runtime encodes bit fields as b followed by the number of bits. 7478 // The GNU runtime requires more information; bitfields are encoded as b, 7479 // then the offset (in bits) of the first element, then the type of the 7480 // bitfield, then the size in bits. For example, in this structure: 7481 // 7482 // struct 7483 // { 7484 // int integer; 7485 // int flags:2; 7486 // }; 7487 // On a 32-bit system, the encoding for flags would be b2 for the NeXT 7488 // runtime, but b32i2 for the GNU runtime. The reason for this extra 7489 // information is not especially sensible, but we're stuck with it for 7490 // compatibility with GCC, although providing it breaks anything that 7491 // actually uses runtime introspection and wants to work on both runtimes... 7492 if (Ctx->getLangOpts().ObjCRuntime.isGNUFamily()) { 7493 uint64_t Offset; 7494 7495 if (const auto *IVD = dyn_cast<ObjCIvarDecl>(FD)) { 7496 Offset = Ctx->lookupFieldBitOffset(IVD->getContainingInterface(), nullptr, 7497 IVD); 7498 } else { 7499 const RecordDecl *RD = FD->getParent(); 7500 const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD); 7501 Offset = RL.getFieldOffset(FD->getFieldIndex()); 7502 } 7503 7504 S += llvm::utostr(Offset); 7505 7506 if (const auto *ET = T->getAs<EnumType>()) 7507 S += ObjCEncodingForEnumType(Ctx, ET); 7508 else { 7509 const auto *BT = T->castAs<BuiltinType>(); 7510 S += getObjCEncodingForPrimitiveType(Ctx, BT); 7511 } 7512 } 7513 S += llvm::utostr(FD->getBitWidthValue(*Ctx)); 7514 } 7515 7516 // Helper function for determining whether the encoded type string would include 7517 // a template specialization type. 7518 static bool hasTemplateSpecializationInEncodedString(const Type *T, 7519 bool VisitBasesAndFields) { 7520 T = T->getBaseElementTypeUnsafe(); 7521 7522 if (auto *PT = T->getAs<PointerType>()) 7523 return hasTemplateSpecializationInEncodedString( 7524 PT->getPointeeType().getTypePtr(), false); 7525 7526 auto *CXXRD = T->getAsCXXRecordDecl(); 7527 7528 if (!CXXRD) 7529 return false; 7530 7531 if (isa<ClassTemplateSpecializationDecl>(CXXRD)) 7532 return true; 7533 7534 if (!CXXRD->hasDefinition() || !VisitBasesAndFields) 7535 return false; 7536 7537 for (auto B : CXXRD->bases()) 7538 if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(), 7539 true)) 7540 return true; 7541 7542 for (auto *FD : CXXRD->fields()) 7543 if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(), 7544 true)) 7545 return true; 7546 7547 return false; 7548 } 7549 7550 // FIXME: Use SmallString for accumulating string. 7551 void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S, 7552 const ObjCEncOptions Options, 7553 const FieldDecl *FD, 7554 QualType *NotEncodedT) const { 7555 CanQualType CT = getCanonicalType(T); 7556 switch (CT->getTypeClass()) { 7557 case Type::Builtin: 7558 case Type::Enum: 7559 if (FD && FD->isBitField()) 7560 return EncodeBitField(this, S, T, FD); 7561 if (const auto *BT = dyn_cast<BuiltinType>(CT)) 7562 S += getObjCEncodingForPrimitiveType(this, BT); 7563 else 7564 S += ObjCEncodingForEnumType(this, cast<EnumType>(CT)); 7565 return; 7566 7567 case Type::Complex: 7568 S += 'j'; 7569 getObjCEncodingForTypeImpl(T->castAs<ComplexType>()->getElementType(), S, 7570 ObjCEncOptions(), 7571 /*Field=*/nullptr); 7572 return; 7573 7574 case Type::Atomic: 7575 S += 'A'; 7576 getObjCEncodingForTypeImpl(T->castAs<AtomicType>()->getValueType(), S, 7577 ObjCEncOptions(), 7578 /*Field=*/nullptr); 7579 return; 7580 7581 // encoding for pointer or reference types. 7582 case Type::Pointer: 7583 case Type::LValueReference: 7584 case Type::RValueReference: { 7585 QualType PointeeTy; 7586 if (isa<PointerType>(CT)) { 7587 const auto *PT = T->castAs<PointerType>(); 7588 if (PT->isObjCSelType()) { 7589 S += ':'; 7590 return; 7591 } 7592 PointeeTy = PT->getPointeeType(); 7593 } else { 7594 PointeeTy = T->castAs<ReferenceType>()->getPointeeType(); 7595 } 7596 7597 bool isReadOnly = false; 7598 // For historical/compatibility reasons, the read-only qualifier of the 7599 // pointee gets emitted _before_ the '^'. The read-only qualifier of 7600 // the pointer itself gets ignored, _unless_ we are looking at a typedef! 7601 // Also, do not emit the 'r' for anything but the outermost type! 7602 if (isa<TypedefType>(T.getTypePtr())) { 7603 if (Options.IsOutermostType() && T.isConstQualified()) { 7604 isReadOnly = true; 7605 S += 'r'; 7606 } 7607 } else if (Options.IsOutermostType()) { 7608 QualType P = PointeeTy; 7609 while (auto PT = P->getAs<PointerType>()) 7610 P = PT->getPointeeType(); 7611 if (P.isConstQualified()) { 7612 isReadOnly = true; 7613 S += 'r'; 7614 } 7615 } 7616 if (isReadOnly) { 7617 // Another legacy compatibility encoding. Some ObjC qualifier and type 7618 // combinations need to be rearranged. 7619 // Rewrite "in const" from "nr" to "rn" 7620 if (StringRef(S).endswith("nr")) 7621 S.replace(S.end()-2, S.end(), "rn"); 7622 } 7623 7624 if (PointeeTy->isCharType()) { 7625 // char pointer types should be encoded as '*' unless it is a 7626 // type that has been typedef'd to 'BOOL'. 7627 if (!isTypeTypedefedAsBOOL(PointeeTy)) { 7628 S += '*'; 7629 return; 7630 } 7631 } else if (const auto *RTy = PointeeTy->getAs<RecordType>()) { 7632 // GCC binary compat: Need to convert "struct objc_class *" to "#". 7633 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) { 7634 S += '#'; 7635 return; 7636 } 7637 // GCC binary compat: Need to convert "struct objc_object *" to "@". 7638 if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) { 7639 S += '@'; 7640 return; 7641 } 7642 // If the encoded string for the class includes template names, just emit 7643 // "^v" for pointers to the class. 7644 if (getLangOpts().CPlusPlus && 7645 (!getLangOpts().EncodeCXXClassTemplateSpec && 7646 hasTemplateSpecializationInEncodedString( 7647 RTy, Options.ExpandPointedToStructures()))) { 7648 S += "^v"; 7649 return; 7650 } 7651 // fall through... 7652 } 7653 S += '^'; 7654 getLegacyIntegralTypeEncoding(PointeeTy); 7655 7656 ObjCEncOptions NewOptions; 7657 if (Options.ExpandPointedToStructures()) 7658 NewOptions.setExpandStructures(); 7659 getObjCEncodingForTypeImpl(PointeeTy, S, NewOptions, 7660 /*Field=*/nullptr, NotEncodedT); 7661 return; 7662 } 7663 7664 case Type::ConstantArray: 7665 case Type::IncompleteArray: 7666 case Type::VariableArray: { 7667 const auto *AT = cast<ArrayType>(CT); 7668 7669 if (isa<IncompleteArrayType>(AT) && !Options.IsStructField()) { 7670 // Incomplete arrays are encoded as a pointer to the array element. 7671 S += '^'; 7672 7673 getObjCEncodingForTypeImpl( 7674 AT->getElementType(), S, 7675 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD); 7676 } else { 7677 S += '['; 7678 7679 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) 7680 S += llvm::utostr(CAT->getSize().getZExtValue()); 7681 else { 7682 //Variable length arrays are encoded as a regular array with 0 elements. 7683 assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) && 7684 "Unknown array type!"); 7685 S += '0'; 7686 } 7687 7688 getObjCEncodingForTypeImpl( 7689 AT->getElementType(), S, 7690 Options.keepingOnly(ObjCEncOptions().setExpandStructures()), FD, 7691 NotEncodedT); 7692 S += ']'; 7693 } 7694 return; 7695 } 7696 7697 case Type::FunctionNoProto: 7698 case Type::FunctionProto: 7699 S += '?'; 7700 return; 7701 7702 case Type::Record: { 7703 RecordDecl *RDecl = cast<RecordType>(CT)->getDecl(); 7704 S += RDecl->isUnion() ? '(' : '{'; 7705 // Anonymous structures print as '?' 7706 if (const IdentifierInfo *II = RDecl->getIdentifier()) { 7707 S += II->getName(); 7708 if (const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) { 7709 const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); 7710 llvm::raw_string_ostream OS(S); 7711 printTemplateArgumentList(OS, TemplateArgs.asArray(), 7712 getPrintingPolicy()); 7713 } 7714 } else { 7715 S += '?'; 7716 } 7717 if (Options.ExpandStructures()) { 7718 S += '='; 7719 if (!RDecl->isUnion()) { 7720 getObjCEncodingForStructureImpl(RDecl, S, FD, true, NotEncodedT); 7721 } else { 7722 for (const auto *Field : RDecl->fields()) { 7723 if (FD) { 7724 S += '"'; 7725 S += Field->getNameAsString(); 7726 S += '"'; 7727 } 7728 7729 // Special case bit-fields. 7730 if (Field->isBitField()) { 7731 getObjCEncodingForTypeImpl(Field->getType(), S, 7732 ObjCEncOptions().setExpandStructures(), 7733 Field); 7734 } else { 7735 QualType qt = Field->getType(); 7736 getLegacyIntegralTypeEncoding(qt); 7737 getObjCEncodingForTypeImpl( 7738 qt, S, 7739 ObjCEncOptions().setExpandStructures().setIsStructField(), FD, 7740 NotEncodedT); 7741 } 7742 } 7743 } 7744 } 7745 S += RDecl->isUnion() ? ')' : '}'; 7746 return; 7747 } 7748 7749 case Type::BlockPointer: { 7750 const auto *BT = T->castAs<BlockPointerType>(); 7751 S += "@?"; // Unlike a pointer-to-function, which is "^?". 7752 if (Options.EncodeBlockParameters()) { 7753 const auto *FT = BT->getPointeeType()->castAs<FunctionType>(); 7754 7755 S += '<'; 7756 // Block return type 7757 getObjCEncodingForTypeImpl(FT->getReturnType(), S, 7758 Options.forComponentType(), FD, NotEncodedT); 7759 // Block self 7760 S += "@?"; 7761 // Block parameters 7762 if (const auto *FPT = dyn_cast<FunctionProtoType>(FT)) { 7763 for (const auto &I : FPT->param_types()) 7764 getObjCEncodingForTypeImpl(I, S, Options.forComponentType(), FD, 7765 NotEncodedT); 7766 } 7767 S += '>'; 7768 } 7769 return; 7770 } 7771 7772 case Type::ObjCObject: { 7773 // hack to match legacy encoding of *id and *Class 7774 QualType Ty = getObjCObjectPointerType(CT); 7775 if (Ty->isObjCIdType()) { 7776 S += "{objc_object=}"; 7777 return; 7778 } 7779 else if (Ty->isObjCClassType()) { 7780 S += "{objc_class=}"; 7781 return; 7782 } 7783 // TODO: Double check to make sure this intentionally falls through. 7784 LLVM_FALLTHROUGH; 7785 } 7786 7787 case Type::ObjCInterface: { 7788 // Ignore protocol qualifiers when mangling at this level. 7789 // @encode(class_name) 7790 ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface(); 7791 S += '{'; 7792 S += OI->getObjCRuntimeNameAsString(); 7793 if (Options.ExpandStructures()) { 7794 S += '='; 7795 SmallVector<const ObjCIvarDecl*, 32> Ivars; 7796 DeepCollectObjCIvars(OI, true, Ivars); 7797 for (unsigned i = 0, e = Ivars.size(); i != e; ++i) { 7798 const FieldDecl *Field = Ivars[i]; 7799 if (Field->isBitField()) 7800 getObjCEncodingForTypeImpl(Field->getType(), S, 7801 ObjCEncOptions().setExpandStructures(), 7802 Field); 7803 else 7804 getObjCEncodingForTypeImpl(Field->getType(), S, 7805 ObjCEncOptions().setExpandStructures(), FD, 7806 NotEncodedT); 7807 } 7808 } 7809 S += '}'; 7810 return; 7811 } 7812 7813 case Type::ObjCObjectPointer: { 7814 const auto *OPT = T->castAs<ObjCObjectPointerType>(); 7815 if (OPT->isObjCIdType()) { 7816 S += '@'; 7817 return; 7818 } 7819 7820 if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) { 7821 // FIXME: Consider if we need to output qualifiers for 'Class<p>'. 7822 // Since this is a binary compatibility issue, need to consult with 7823 // runtime folks. Fortunately, this is a *very* obscure construct. 7824 S += '#'; 7825 return; 7826 } 7827 7828 if (OPT->isObjCQualifiedIdType()) { 7829 getObjCEncodingForTypeImpl( 7830 getObjCIdType(), S, 7831 Options.keepingOnly(ObjCEncOptions() 7832 .setExpandPointedToStructures() 7833 .setExpandStructures()), 7834 FD); 7835 if (FD || Options.EncodingProperty() || Options.EncodeClassNames()) { 7836 // Note that we do extended encoding of protocol qualifier list 7837 // Only when doing ivar or property encoding. 7838 S += '"'; 7839 for (const auto *I : OPT->quals()) { 7840 S += '<'; 7841 S += I->getObjCRuntimeNameAsString(); 7842 S += '>'; 7843 } 7844 S += '"'; 7845 } 7846 return; 7847 } 7848 7849 S += '@'; 7850 if (OPT->getInterfaceDecl() && 7851 (FD || Options.EncodingProperty() || Options.EncodeClassNames())) { 7852 S += '"'; 7853 S += OPT->getInterfaceDecl()->getObjCRuntimeNameAsString(); 7854 for (const auto *I : OPT->quals()) { 7855 S += '<'; 7856 S += I->getObjCRuntimeNameAsString(); 7857 S += '>'; 7858 } 7859 S += '"'; 7860 } 7861 return; 7862 } 7863 7864 // gcc just blithely ignores member pointers. 7865 // FIXME: we should do better than that. 'M' is available. 7866 case Type::MemberPointer: 7867 // This matches gcc's encoding, even though technically it is insufficient. 7868 //FIXME. We should do a better job than gcc. 7869 case Type::Vector: 7870 case Type::ExtVector: 7871 // Until we have a coherent encoding of these three types, issue warning. 7872 if (NotEncodedT) 7873 *NotEncodedT = T; 7874 return; 7875 7876 case Type::ConstantMatrix: 7877 if (NotEncodedT) 7878 *NotEncodedT = T; 7879 return; 7880 7881 // We could see an undeduced auto type here during error recovery. 7882 // Just ignore it. 7883 case Type::Auto: 7884 case Type::DeducedTemplateSpecialization: 7885 return; 7886 7887 case Type::Pipe: 7888 case Type::ExtInt: 7889 #define ABSTRACT_TYPE(KIND, BASE) 7890 #define TYPE(KIND, BASE) 7891 #define DEPENDENT_TYPE(KIND, BASE) \ 7892 case Type::KIND: 7893 #define NON_CANONICAL_TYPE(KIND, BASE) \ 7894 case Type::KIND: 7895 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(KIND, BASE) \ 7896 case Type::KIND: 7897 #include "clang/AST/TypeNodes.inc" 7898 llvm_unreachable("@encode for dependent type!"); 7899 } 7900 llvm_unreachable("bad type kind!"); 7901 } 7902 7903 void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl, 7904 std::string &S, 7905 const FieldDecl *FD, 7906 bool includeVBases, 7907 QualType *NotEncodedT) const { 7908 assert(RDecl && "Expected non-null RecordDecl"); 7909 assert(!RDecl->isUnion() && "Should not be called for unions"); 7910 if (!RDecl->getDefinition() || RDecl->getDefinition()->isInvalidDecl()) 7911 return; 7912 7913 const auto *CXXRec = dyn_cast<CXXRecordDecl>(RDecl); 7914 std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets; 7915 const ASTRecordLayout &layout = getASTRecordLayout(RDecl); 7916 7917 if (CXXRec) { 7918 for (const auto &BI : CXXRec->bases()) { 7919 if (!BI.isVirtual()) { 7920 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 7921 if (base->isEmpty()) 7922 continue; 7923 uint64_t offs = toBits(layout.getBaseClassOffset(base)); 7924 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 7925 std::make_pair(offs, base)); 7926 } 7927 } 7928 } 7929 7930 unsigned i = 0; 7931 for (FieldDecl *Field : RDecl->fields()) { 7932 if (!Field->isZeroLengthBitField(*this) && Field->isZeroSize(*this)) 7933 continue; 7934 uint64_t offs = layout.getFieldOffset(i); 7935 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 7936 std::make_pair(offs, Field)); 7937 ++i; 7938 } 7939 7940 if (CXXRec && includeVBases) { 7941 for (const auto &BI : CXXRec->vbases()) { 7942 CXXRecordDecl *base = BI.getType()->getAsCXXRecordDecl(); 7943 if (base->isEmpty()) 7944 continue; 7945 uint64_t offs = toBits(layout.getVBaseClassOffset(base)); 7946 if (offs >= uint64_t(toBits(layout.getNonVirtualSize())) && 7947 FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end()) 7948 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(), 7949 std::make_pair(offs, base)); 7950 } 7951 } 7952 7953 CharUnits size; 7954 if (CXXRec) { 7955 size = includeVBases ? layout.getSize() : layout.getNonVirtualSize(); 7956 } else { 7957 size = layout.getSize(); 7958 } 7959 7960 #ifndef NDEBUG 7961 uint64_t CurOffs = 0; 7962 #endif 7963 std::multimap<uint64_t, NamedDecl *>::iterator 7964 CurLayObj = FieldOrBaseOffsets.begin(); 7965 7966 if (CXXRec && CXXRec->isDynamicClass() && 7967 (CurLayObj == FieldOrBaseOffsets.end() || CurLayObj->first != 0)) { 7968 if (FD) { 7969 S += "\"_vptr$"; 7970 std::string recname = CXXRec->getNameAsString(); 7971 if (recname.empty()) recname = "?"; 7972 S += recname; 7973 S += '"'; 7974 } 7975 S += "^^?"; 7976 #ifndef NDEBUG 7977 CurOffs += getTypeSize(VoidPtrTy); 7978 #endif 7979 } 7980 7981 if (!RDecl->hasFlexibleArrayMember()) { 7982 // Mark the end of the structure. 7983 uint64_t offs = toBits(size); 7984 FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs), 7985 std::make_pair(offs, nullptr)); 7986 } 7987 7988 for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) { 7989 #ifndef NDEBUG 7990 assert(CurOffs <= CurLayObj->first); 7991 if (CurOffs < CurLayObj->first) { 7992 uint64_t padding = CurLayObj->first - CurOffs; 7993 // FIXME: There doesn't seem to be a way to indicate in the encoding that 7994 // packing/alignment of members is different that normal, in which case 7995 // the encoding will be out-of-sync with the real layout. 7996 // If the runtime switches to just consider the size of types without 7997 // taking into account alignment, we could make padding explicit in the 7998 // encoding (e.g. using arrays of chars). The encoding strings would be 7999 // longer then though. 8000 CurOffs += padding; 8001 } 8002 #endif 8003 8004 NamedDecl *dcl = CurLayObj->second; 8005 if (!dcl) 8006 break; // reached end of structure. 8007 8008 if (auto *base = dyn_cast<CXXRecordDecl>(dcl)) { 8009 // We expand the bases without their virtual bases since those are going 8010 // in the initial structure. Note that this differs from gcc which 8011 // expands virtual bases each time one is encountered in the hierarchy, 8012 // making the encoding type bigger than it really is. 8013 getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false, 8014 NotEncodedT); 8015 assert(!base->isEmpty()); 8016 #ifndef NDEBUG 8017 CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize()); 8018 #endif 8019 } else { 8020 const auto *field = cast<FieldDecl>(dcl); 8021 if (FD) { 8022 S += '"'; 8023 S += field->getNameAsString(); 8024 S += '"'; 8025 } 8026 8027 if (field->isBitField()) { 8028 EncodeBitField(this, S, field->getType(), field); 8029 #ifndef NDEBUG 8030 CurOffs += field->getBitWidthValue(*this); 8031 #endif 8032 } else { 8033 QualType qt = field->getType(); 8034 getLegacyIntegralTypeEncoding(qt); 8035 getObjCEncodingForTypeImpl( 8036 qt, S, ObjCEncOptions().setExpandStructures().setIsStructField(), 8037 FD, NotEncodedT); 8038 #ifndef NDEBUG 8039 CurOffs += getTypeSize(field->getType()); 8040 #endif 8041 } 8042 } 8043 } 8044 } 8045 8046 void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT, 8047 std::string& S) const { 8048 if (QT & Decl::OBJC_TQ_In) 8049 S += 'n'; 8050 if (QT & Decl::OBJC_TQ_Inout) 8051 S += 'N'; 8052 if (QT & Decl::OBJC_TQ_Out) 8053 S += 'o'; 8054 if (QT & Decl::OBJC_TQ_Bycopy) 8055 S += 'O'; 8056 if (QT & Decl::OBJC_TQ_Byref) 8057 S += 'R'; 8058 if (QT & Decl::OBJC_TQ_Oneway) 8059 S += 'V'; 8060 } 8061 8062 TypedefDecl *ASTContext::getObjCIdDecl() const { 8063 if (!ObjCIdDecl) { 8064 QualType T = getObjCObjectType(ObjCBuiltinIdTy, {}, {}); 8065 T = getObjCObjectPointerType(T); 8066 ObjCIdDecl = buildImplicitTypedef(T, "id"); 8067 } 8068 return ObjCIdDecl; 8069 } 8070 8071 TypedefDecl *ASTContext::getObjCSelDecl() const { 8072 if (!ObjCSelDecl) { 8073 QualType T = getPointerType(ObjCBuiltinSelTy); 8074 ObjCSelDecl = buildImplicitTypedef(T, "SEL"); 8075 } 8076 return ObjCSelDecl; 8077 } 8078 8079 TypedefDecl *ASTContext::getObjCClassDecl() const { 8080 if (!ObjCClassDecl) { 8081 QualType T = getObjCObjectType(ObjCBuiltinClassTy, {}, {}); 8082 T = getObjCObjectPointerType(T); 8083 ObjCClassDecl = buildImplicitTypedef(T, "Class"); 8084 } 8085 return ObjCClassDecl; 8086 } 8087 8088 ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const { 8089 if (!ObjCProtocolClassDecl) { 8090 ObjCProtocolClassDecl 8091 = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(), 8092 SourceLocation(), 8093 &Idents.get("Protocol"), 8094 /*typeParamList=*/nullptr, 8095 /*PrevDecl=*/nullptr, 8096 SourceLocation(), true); 8097 } 8098 8099 return ObjCProtocolClassDecl; 8100 } 8101 8102 //===----------------------------------------------------------------------===// 8103 // __builtin_va_list Construction Functions 8104 //===----------------------------------------------------------------------===// 8105 8106 static TypedefDecl *CreateCharPtrNamedVaListDecl(const ASTContext *Context, 8107 StringRef Name) { 8108 // typedef char* __builtin[_ms]_va_list; 8109 QualType T = Context->getPointerType(Context->CharTy); 8110 return Context->buildImplicitTypedef(T, Name); 8111 } 8112 8113 static TypedefDecl *CreateMSVaListDecl(const ASTContext *Context) { 8114 return CreateCharPtrNamedVaListDecl(Context, "__builtin_ms_va_list"); 8115 } 8116 8117 static TypedefDecl *CreateCharPtrBuiltinVaListDecl(const ASTContext *Context) { 8118 return CreateCharPtrNamedVaListDecl(Context, "__builtin_va_list"); 8119 } 8120 8121 static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) { 8122 // typedef void* __builtin_va_list; 8123 QualType T = Context->getPointerType(Context->VoidTy); 8124 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8125 } 8126 8127 static TypedefDecl * 8128 CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) { 8129 // struct __va_list 8130 RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list"); 8131 if (Context->getLangOpts().CPlusPlus) { 8132 // namespace std { struct __va_list { 8133 NamespaceDecl *NS; 8134 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 8135 Context->getTranslationUnitDecl(), 8136 /*Inline*/ false, SourceLocation(), 8137 SourceLocation(), &Context->Idents.get("std"), 8138 /*PrevDecl*/ nullptr); 8139 NS->setImplicit(); 8140 VaListTagDecl->setDeclContext(NS); 8141 } 8142 8143 VaListTagDecl->startDefinition(); 8144 8145 const size_t NumFields = 5; 8146 QualType FieldTypes[NumFields]; 8147 const char *FieldNames[NumFields]; 8148 8149 // void *__stack; 8150 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8151 FieldNames[0] = "__stack"; 8152 8153 // void *__gr_top; 8154 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8155 FieldNames[1] = "__gr_top"; 8156 8157 // void *__vr_top; 8158 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8159 FieldNames[2] = "__vr_top"; 8160 8161 // int __gr_offs; 8162 FieldTypes[3] = Context->IntTy; 8163 FieldNames[3] = "__gr_offs"; 8164 8165 // int __vr_offs; 8166 FieldTypes[4] = Context->IntTy; 8167 FieldNames[4] = "__vr_offs"; 8168 8169 // Create fields 8170 for (unsigned i = 0; i < NumFields; ++i) { 8171 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8172 VaListTagDecl, 8173 SourceLocation(), 8174 SourceLocation(), 8175 &Context->Idents.get(FieldNames[i]), 8176 FieldTypes[i], /*TInfo=*/nullptr, 8177 /*BitWidth=*/nullptr, 8178 /*Mutable=*/false, 8179 ICIS_NoInit); 8180 Field->setAccess(AS_public); 8181 VaListTagDecl->addDecl(Field); 8182 } 8183 VaListTagDecl->completeDefinition(); 8184 Context->VaListTagDecl = VaListTagDecl; 8185 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8186 8187 // } __builtin_va_list; 8188 return Context->buildImplicitTypedef(VaListTagType, "__builtin_va_list"); 8189 } 8190 8191 static TypedefDecl *CreatePowerABIBuiltinVaListDecl(const ASTContext *Context) { 8192 // typedef struct __va_list_tag { 8193 RecordDecl *VaListTagDecl; 8194 8195 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8196 VaListTagDecl->startDefinition(); 8197 8198 const size_t NumFields = 5; 8199 QualType FieldTypes[NumFields]; 8200 const char *FieldNames[NumFields]; 8201 8202 // unsigned char gpr; 8203 FieldTypes[0] = Context->UnsignedCharTy; 8204 FieldNames[0] = "gpr"; 8205 8206 // unsigned char fpr; 8207 FieldTypes[1] = Context->UnsignedCharTy; 8208 FieldNames[1] = "fpr"; 8209 8210 // unsigned short reserved; 8211 FieldTypes[2] = Context->UnsignedShortTy; 8212 FieldNames[2] = "reserved"; 8213 8214 // void* overflow_arg_area; 8215 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8216 FieldNames[3] = "overflow_arg_area"; 8217 8218 // void* reg_save_area; 8219 FieldTypes[4] = Context->getPointerType(Context->VoidTy); 8220 FieldNames[4] = "reg_save_area"; 8221 8222 // Create fields 8223 for (unsigned i = 0; i < NumFields; ++i) { 8224 FieldDecl *Field = FieldDecl::Create(*Context, VaListTagDecl, 8225 SourceLocation(), 8226 SourceLocation(), 8227 &Context->Idents.get(FieldNames[i]), 8228 FieldTypes[i], /*TInfo=*/nullptr, 8229 /*BitWidth=*/nullptr, 8230 /*Mutable=*/false, 8231 ICIS_NoInit); 8232 Field->setAccess(AS_public); 8233 VaListTagDecl->addDecl(Field); 8234 } 8235 VaListTagDecl->completeDefinition(); 8236 Context->VaListTagDecl = VaListTagDecl; 8237 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8238 8239 // } __va_list_tag; 8240 TypedefDecl *VaListTagTypedefDecl = 8241 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8242 8243 QualType VaListTagTypedefType = 8244 Context->getTypedefType(VaListTagTypedefDecl); 8245 8246 // typedef __va_list_tag __builtin_va_list[1]; 8247 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8248 QualType VaListTagArrayType 8249 = Context->getConstantArrayType(VaListTagTypedefType, 8250 Size, nullptr, ArrayType::Normal, 0); 8251 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8252 } 8253 8254 static TypedefDecl * 8255 CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) { 8256 // struct __va_list_tag { 8257 RecordDecl *VaListTagDecl; 8258 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8259 VaListTagDecl->startDefinition(); 8260 8261 const size_t NumFields = 4; 8262 QualType FieldTypes[NumFields]; 8263 const char *FieldNames[NumFields]; 8264 8265 // unsigned gp_offset; 8266 FieldTypes[0] = Context->UnsignedIntTy; 8267 FieldNames[0] = "gp_offset"; 8268 8269 // unsigned fp_offset; 8270 FieldTypes[1] = Context->UnsignedIntTy; 8271 FieldNames[1] = "fp_offset"; 8272 8273 // void* overflow_arg_area; 8274 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8275 FieldNames[2] = "overflow_arg_area"; 8276 8277 // void* reg_save_area; 8278 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8279 FieldNames[3] = "reg_save_area"; 8280 8281 // Create fields 8282 for (unsigned i = 0; i < NumFields; ++i) { 8283 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8284 VaListTagDecl, 8285 SourceLocation(), 8286 SourceLocation(), 8287 &Context->Idents.get(FieldNames[i]), 8288 FieldTypes[i], /*TInfo=*/nullptr, 8289 /*BitWidth=*/nullptr, 8290 /*Mutable=*/false, 8291 ICIS_NoInit); 8292 Field->setAccess(AS_public); 8293 VaListTagDecl->addDecl(Field); 8294 } 8295 VaListTagDecl->completeDefinition(); 8296 Context->VaListTagDecl = VaListTagDecl; 8297 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8298 8299 // }; 8300 8301 // typedef struct __va_list_tag __builtin_va_list[1]; 8302 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8303 QualType VaListTagArrayType = Context->getConstantArrayType( 8304 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 8305 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8306 } 8307 8308 static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) { 8309 // typedef int __builtin_va_list[4]; 8310 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4); 8311 QualType IntArrayType = Context->getConstantArrayType( 8312 Context->IntTy, Size, nullptr, ArrayType::Normal, 0); 8313 return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list"); 8314 } 8315 8316 static TypedefDecl * 8317 CreateAAPCSABIBuiltinVaListDecl(const ASTContext *Context) { 8318 // struct __va_list 8319 RecordDecl *VaListDecl = Context->buildImplicitRecord("__va_list"); 8320 if (Context->getLangOpts().CPlusPlus) { 8321 // namespace std { struct __va_list { 8322 NamespaceDecl *NS; 8323 NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context), 8324 Context->getTranslationUnitDecl(), 8325 /*Inline*/false, SourceLocation(), 8326 SourceLocation(), &Context->Idents.get("std"), 8327 /*PrevDecl*/ nullptr); 8328 NS->setImplicit(); 8329 VaListDecl->setDeclContext(NS); 8330 } 8331 8332 VaListDecl->startDefinition(); 8333 8334 // void * __ap; 8335 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8336 VaListDecl, 8337 SourceLocation(), 8338 SourceLocation(), 8339 &Context->Idents.get("__ap"), 8340 Context->getPointerType(Context->VoidTy), 8341 /*TInfo=*/nullptr, 8342 /*BitWidth=*/nullptr, 8343 /*Mutable=*/false, 8344 ICIS_NoInit); 8345 Field->setAccess(AS_public); 8346 VaListDecl->addDecl(Field); 8347 8348 // }; 8349 VaListDecl->completeDefinition(); 8350 Context->VaListTagDecl = VaListDecl; 8351 8352 // typedef struct __va_list __builtin_va_list; 8353 QualType T = Context->getRecordType(VaListDecl); 8354 return Context->buildImplicitTypedef(T, "__builtin_va_list"); 8355 } 8356 8357 static TypedefDecl * 8358 CreateSystemZBuiltinVaListDecl(const ASTContext *Context) { 8359 // struct __va_list_tag { 8360 RecordDecl *VaListTagDecl; 8361 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8362 VaListTagDecl->startDefinition(); 8363 8364 const size_t NumFields = 4; 8365 QualType FieldTypes[NumFields]; 8366 const char *FieldNames[NumFields]; 8367 8368 // long __gpr; 8369 FieldTypes[0] = Context->LongTy; 8370 FieldNames[0] = "__gpr"; 8371 8372 // long __fpr; 8373 FieldTypes[1] = Context->LongTy; 8374 FieldNames[1] = "__fpr"; 8375 8376 // void *__overflow_arg_area; 8377 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8378 FieldNames[2] = "__overflow_arg_area"; 8379 8380 // void *__reg_save_area; 8381 FieldTypes[3] = Context->getPointerType(Context->VoidTy); 8382 FieldNames[3] = "__reg_save_area"; 8383 8384 // Create fields 8385 for (unsigned i = 0; i < NumFields; ++i) { 8386 FieldDecl *Field = FieldDecl::Create(const_cast<ASTContext &>(*Context), 8387 VaListTagDecl, 8388 SourceLocation(), 8389 SourceLocation(), 8390 &Context->Idents.get(FieldNames[i]), 8391 FieldTypes[i], /*TInfo=*/nullptr, 8392 /*BitWidth=*/nullptr, 8393 /*Mutable=*/false, 8394 ICIS_NoInit); 8395 Field->setAccess(AS_public); 8396 VaListTagDecl->addDecl(Field); 8397 } 8398 VaListTagDecl->completeDefinition(); 8399 Context->VaListTagDecl = VaListTagDecl; 8400 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8401 8402 // }; 8403 8404 // typedef __va_list_tag __builtin_va_list[1]; 8405 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8406 QualType VaListTagArrayType = Context->getConstantArrayType( 8407 VaListTagType, Size, nullptr, ArrayType::Normal, 0); 8408 8409 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8410 } 8411 8412 static TypedefDecl *CreateHexagonBuiltinVaListDecl(const ASTContext *Context) { 8413 // typedef struct __va_list_tag { 8414 RecordDecl *VaListTagDecl; 8415 VaListTagDecl = Context->buildImplicitRecord("__va_list_tag"); 8416 VaListTagDecl->startDefinition(); 8417 8418 const size_t NumFields = 3; 8419 QualType FieldTypes[NumFields]; 8420 const char *FieldNames[NumFields]; 8421 8422 // void *CurrentSavedRegisterArea; 8423 FieldTypes[0] = Context->getPointerType(Context->VoidTy); 8424 FieldNames[0] = "__current_saved_reg_area_pointer"; 8425 8426 // void *SavedRegAreaEnd; 8427 FieldTypes[1] = Context->getPointerType(Context->VoidTy); 8428 FieldNames[1] = "__saved_reg_area_end_pointer"; 8429 8430 // void *OverflowArea; 8431 FieldTypes[2] = Context->getPointerType(Context->VoidTy); 8432 FieldNames[2] = "__overflow_area_pointer"; 8433 8434 // Create fields 8435 for (unsigned i = 0; i < NumFields; ++i) { 8436 FieldDecl *Field = FieldDecl::Create( 8437 const_cast<ASTContext &>(*Context), VaListTagDecl, SourceLocation(), 8438 SourceLocation(), &Context->Idents.get(FieldNames[i]), FieldTypes[i], 8439 /*TInfo=*/0, 8440 /*BitWidth=*/0, 8441 /*Mutable=*/false, ICIS_NoInit); 8442 Field->setAccess(AS_public); 8443 VaListTagDecl->addDecl(Field); 8444 } 8445 VaListTagDecl->completeDefinition(); 8446 Context->VaListTagDecl = VaListTagDecl; 8447 QualType VaListTagType = Context->getRecordType(VaListTagDecl); 8448 8449 // } __va_list_tag; 8450 TypedefDecl *VaListTagTypedefDecl = 8451 Context->buildImplicitTypedef(VaListTagType, "__va_list_tag"); 8452 8453 QualType VaListTagTypedefType = Context->getTypedefType(VaListTagTypedefDecl); 8454 8455 // typedef __va_list_tag __builtin_va_list[1]; 8456 llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 1); 8457 QualType VaListTagArrayType = Context->getConstantArrayType( 8458 VaListTagTypedefType, Size, nullptr, ArrayType::Normal, 0); 8459 8460 return Context->buildImplicitTypedef(VaListTagArrayType, "__builtin_va_list"); 8461 } 8462 8463 static TypedefDecl *CreateVaListDecl(const ASTContext *Context, 8464 TargetInfo::BuiltinVaListKind Kind) { 8465 switch (Kind) { 8466 case TargetInfo::CharPtrBuiltinVaList: 8467 return CreateCharPtrBuiltinVaListDecl(Context); 8468 case TargetInfo::VoidPtrBuiltinVaList: 8469 return CreateVoidPtrBuiltinVaListDecl(Context); 8470 case TargetInfo::AArch64ABIBuiltinVaList: 8471 return CreateAArch64ABIBuiltinVaListDecl(Context); 8472 case TargetInfo::PowerABIBuiltinVaList: 8473 return CreatePowerABIBuiltinVaListDecl(Context); 8474 case TargetInfo::X86_64ABIBuiltinVaList: 8475 return CreateX86_64ABIBuiltinVaListDecl(Context); 8476 case TargetInfo::PNaClABIBuiltinVaList: 8477 return CreatePNaClABIBuiltinVaListDecl(Context); 8478 case TargetInfo::AAPCSABIBuiltinVaList: 8479 return CreateAAPCSABIBuiltinVaListDecl(Context); 8480 case TargetInfo::SystemZBuiltinVaList: 8481 return CreateSystemZBuiltinVaListDecl(Context); 8482 case TargetInfo::HexagonBuiltinVaList: 8483 return CreateHexagonBuiltinVaListDecl(Context); 8484 } 8485 8486 llvm_unreachable("Unhandled __builtin_va_list type kind"); 8487 } 8488 8489 TypedefDecl *ASTContext::getBuiltinVaListDecl() const { 8490 if (!BuiltinVaListDecl) { 8491 BuiltinVaListDecl = CreateVaListDecl(this, Target->getBuiltinVaListKind()); 8492 assert(BuiltinVaListDecl->isImplicit()); 8493 } 8494 8495 return BuiltinVaListDecl; 8496 } 8497 8498 Decl *ASTContext::getVaListTagDecl() const { 8499 // Force the creation of VaListTagDecl by building the __builtin_va_list 8500 // declaration. 8501 if (!VaListTagDecl) 8502 (void)getBuiltinVaListDecl(); 8503 8504 return VaListTagDecl; 8505 } 8506 8507 TypedefDecl *ASTContext::getBuiltinMSVaListDecl() const { 8508 if (!BuiltinMSVaListDecl) 8509 BuiltinMSVaListDecl = CreateMSVaListDecl(this); 8510 8511 return BuiltinMSVaListDecl; 8512 } 8513 8514 bool ASTContext::canBuiltinBeRedeclared(const FunctionDecl *FD) const { 8515 return BuiltinInfo.canBeRedeclared(FD->getBuiltinID()); 8516 } 8517 8518 void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) { 8519 assert(ObjCConstantStringType.isNull() && 8520 "'NSConstantString' type already set!"); 8521 8522 ObjCConstantStringType = getObjCInterfaceType(Decl); 8523 } 8524 8525 /// Retrieve the template name that corresponds to a non-empty 8526 /// lookup. 8527 TemplateName 8528 ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin, 8529 UnresolvedSetIterator End) const { 8530 unsigned size = End - Begin; 8531 assert(size > 1 && "set is not overloaded!"); 8532 8533 void *memory = Allocate(sizeof(OverloadedTemplateStorage) + 8534 size * sizeof(FunctionTemplateDecl*)); 8535 auto *OT = new (memory) OverloadedTemplateStorage(size); 8536 8537 NamedDecl **Storage = OT->getStorage(); 8538 for (UnresolvedSetIterator I = Begin; I != End; ++I) { 8539 NamedDecl *D = *I; 8540 assert(isa<FunctionTemplateDecl>(D) || 8541 isa<UnresolvedUsingValueDecl>(D) || 8542 (isa<UsingShadowDecl>(D) && 8543 isa<FunctionTemplateDecl>(D->getUnderlyingDecl()))); 8544 *Storage++ = D; 8545 } 8546 8547 return TemplateName(OT); 8548 } 8549 8550 /// Retrieve a template name representing an unqualified-id that has been 8551 /// assumed to name a template for ADL purposes. 8552 TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const { 8553 auto *OT = new (*this) AssumedTemplateStorage(Name); 8554 return TemplateName(OT); 8555 } 8556 8557 /// Retrieve the template name that represents a qualified 8558 /// template name such as \c std::vector. 8559 TemplateName 8560 ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, 8561 bool TemplateKeyword, 8562 TemplateDecl *Template) const { 8563 assert(NNS && "Missing nested-name-specifier in qualified template name"); 8564 8565 // FIXME: Canonicalization? 8566 llvm::FoldingSetNodeID ID; 8567 QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template); 8568 8569 void *InsertPos = nullptr; 8570 QualifiedTemplateName *QTN = 8571 QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 8572 if (!QTN) { 8573 QTN = new (*this, alignof(QualifiedTemplateName)) 8574 QualifiedTemplateName(NNS, TemplateKeyword, Template); 8575 QualifiedTemplateNames.InsertNode(QTN, InsertPos); 8576 } 8577 8578 return TemplateName(QTN); 8579 } 8580 8581 /// Retrieve the template name that represents a dependent 8582 /// template name such as \c MetaFun::template apply. 8583 TemplateName 8584 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 8585 const IdentifierInfo *Name) const { 8586 assert((!NNS || NNS->isDependent()) && 8587 "Nested name specifier must be dependent"); 8588 8589 llvm::FoldingSetNodeID ID; 8590 DependentTemplateName::Profile(ID, NNS, Name); 8591 8592 void *InsertPos = nullptr; 8593 DependentTemplateName *QTN = 8594 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 8595 8596 if (QTN) 8597 return TemplateName(QTN); 8598 8599 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 8600 if (CanonNNS == NNS) { 8601 QTN = new (*this, alignof(DependentTemplateName)) 8602 DependentTemplateName(NNS, Name); 8603 } else { 8604 TemplateName Canon = getDependentTemplateName(CanonNNS, Name); 8605 QTN = new (*this, alignof(DependentTemplateName)) 8606 DependentTemplateName(NNS, Name, Canon); 8607 DependentTemplateName *CheckQTN = 8608 DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 8609 assert(!CheckQTN && "Dependent type name canonicalization broken"); 8610 (void)CheckQTN; 8611 } 8612 8613 DependentTemplateNames.InsertNode(QTN, InsertPos); 8614 return TemplateName(QTN); 8615 } 8616 8617 /// Retrieve the template name that represents a dependent 8618 /// template name such as \c MetaFun::template operator+. 8619 TemplateName 8620 ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, 8621 OverloadedOperatorKind Operator) const { 8622 assert((!NNS || NNS->isDependent()) && 8623 "Nested name specifier must be dependent"); 8624 8625 llvm::FoldingSetNodeID ID; 8626 DependentTemplateName::Profile(ID, NNS, Operator); 8627 8628 void *InsertPos = nullptr; 8629 DependentTemplateName *QTN 8630 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 8631 8632 if (QTN) 8633 return TemplateName(QTN); 8634 8635 NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); 8636 if (CanonNNS == NNS) { 8637 QTN = new (*this, alignof(DependentTemplateName)) 8638 DependentTemplateName(NNS, Operator); 8639 } else { 8640 TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); 8641 QTN = new (*this, alignof(DependentTemplateName)) 8642 DependentTemplateName(NNS, Operator, Canon); 8643 8644 DependentTemplateName *CheckQTN 8645 = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); 8646 assert(!CheckQTN && "Dependent template name canonicalization broken"); 8647 (void)CheckQTN; 8648 } 8649 8650 DependentTemplateNames.InsertNode(QTN, InsertPos); 8651 return TemplateName(QTN); 8652 } 8653 8654 TemplateName 8655 ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param, 8656 TemplateName replacement) const { 8657 llvm::FoldingSetNodeID ID; 8658 SubstTemplateTemplateParmStorage::Profile(ID, param, replacement); 8659 8660 void *insertPos = nullptr; 8661 SubstTemplateTemplateParmStorage *subst 8662 = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos); 8663 8664 if (!subst) { 8665 subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement); 8666 SubstTemplateTemplateParms.InsertNode(subst, insertPos); 8667 } 8668 8669 return TemplateName(subst); 8670 } 8671 8672 TemplateName 8673 ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param, 8674 const TemplateArgument &ArgPack) const { 8675 auto &Self = const_cast<ASTContext &>(*this); 8676 llvm::FoldingSetNodeID ID; 8677 SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack); 8678 8679 void *InsertPos = nullptr; 8680 SubstTemplateTemplateParmPackStorage *Subst 8681 = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos); 8682 8683 if (!Subst) { 8684 Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param, 8685 ArgPack.pack_size(), 8686 ArgPack.pack_begin()); 8687 SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos); 8688 } 8689 8690 return TemplateName(Subst); 8691 } 8692 8693 /// getFromTargetType - Given one of the integer types provided by 8694 /// TargetInfo, produce the corresponding type. The unsigned @p Type 8695 /// is actually a value of type @c TargetInfo::IntType. 8696 CanQualType ASTContext::getFromTargetType(unsigned Type) const { 8697 switch (Type) { 8698 case TargetInfo::NoInt: return {}; 8699 case TargetInfo::SignedChar: return SignedCharTy; 8700 case TargetInfo::UnsignedChar: return UnsignedCharTy; 8701 case TargetInfo::SignedShort: return ShortTy; 8702 case TargetInfo::UnsignedShort: return UnsignedShortTy; 8703 case TargetInfo::SignedInt: return IntTy; 8704 case TargetInfo::UnsignedInt: return UnsignedIntTy; 8705 case TargetInfo::SignedLong: return LongTy; 8706 case TargetInfo::UnsignedLong: return UnsignedLongTy; 8707 case TargetInfo::SignedLongLong: return LongLongTy; 8708 case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy; 8709 } 8710 8711 llvm_unreachable("Unhandled TargetInfo::IntType value"); 8712 } 8713 8714 //===----------------------------------------------------------------------===// 8715 // Type Predicates. 8716 //===----------------------------------------------------------------------===// 8717 8718 /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's 8719 /// garbage collection attribute. 8720 /// 8721 Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const { 8722 if (getLangOpts().getGC() == LangOptions::NonGC) 8723 return Qualifiers::GCNone; 8724 8725 assert(getLangOpts().ObjC); 8726 Qualifiers::GC GCAttrs = Ty.getObjCGCAttr(); 8727 8728 // Default behaviour under objective-C's gc is for ObjC pointers 8729 // (or pointers to them) be treated as though they were declared 8730 // as __strong. 8731 if (GCAttrs == Qualifiers::GCNone) { 8732 if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType()) 8733 return Qualifiers::Strong; 8734 else if (Ty->isPointerType()) 8735 return getObjCGCAttrKind(Ty->castAs<PointerType>()->getPointeeType()); 8736 } else { 8737 // It's not valid to set GC attributes on anything that isn't a 8738 // pointer. 8739 #ifndef NDEBUG 8740 QualType CT = Ty->getCanonicalTypeInternal(); 8741 while (const auto *AT = dyn_cast<ArrayType>(CT)) 8742 CT = AT->getElementType(); 8743 assert(CT->isAnyPointerType() || CT->isBlockPointerType()); 8744 #endif 8745 } 8746 return GCAttrs; 8747 } 8748 8749 //===----------------------------------------------------------------------===// 8750 // Type Compatibility Testing 8751 //===----------------------------------------------------------------------===// 8752 8753 /// areCompatVectorTypes - Return true if the two specified vector types are 8754 /// compatible. 8755 static bool areCompatVectorTypes(const VectorType *LHS, 8756 const VectorType *RHS) { 8757 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 8758 return LHS->getElementType() == RHS->getElementType() && 8759 LHS->getNumElements() == RHS->getNumElements(); 8760 } 8761 8762 /// areCompatMatrixTypes - Return true if the two specified matrix types are 8763 /// compatible. 8764 static bool areCompatMatrixTypes(const ConstantMatrixType *LHS, 8765 const ConstantMatrixType *RHS) { 8766 assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified()); 8767 return LHS->getElementType() == RHS->getElementType() && 8768 LHS->getNumRows() == RHS->getNumRows() && 8769 LHS->getNumColumns() == RHS->getNumColumns(); 8770 } 8771 8772 bool ASTContext::areCompatibleVectorTypes(QualType FirstVec, 8773 QualType SecondVec) { 8774 assert(FirstVec->isVectorType() && "FirstVec should be a vector type"); 8775 assert(SecondVec->isVectorType() && "SecondVec should be a vector type"); 8776 8777 if (hasSameUnqualifiedType(FirstVec, SecondVec)) 8778 return true; 8779 8780 // Treat Neon vector types and most AltiVec vector types as if they are the 8781 // equivalent GCC vector types. 8782 const auto *First = FirstVec->castAs<VectorType>(); 8783 const auto *Second = SecondVec->castAs<VectorType>(); 8784 if (First->getNumElements() == Second->getNumElements() && 8785 hasSameType(First->getElementType(), Second->getElementType()) && 8786 First->getVectorKind() != VectorType::AltiVecPixel && 8787 First->getVectorKind() != VectorType::AltiVecBool && 8788 Second->getVectorKind() != VectorType::AltiVecPixel && 8789 Second->getVectorKind() != VectorType::AltiVecBool && 8790 First->getVectorKind() != VectorType::SveFixedLengthDataVector && 8791 First->getVectorKind() != VectorType::SveFixedLengthPredicateVector && 8792 Second->getVectorKind() != VectorType::SveFixedLengthDataVector && 8793 Second->getVectorKind() != VectorType::SveFixedLengthPredicateVector) 8794 return true; 8795 8796 return false; 8797 } 8798 8799 /// getSVETypeSize - Return SVE vector or predicate register size. 8800 static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) { 8801 assert(Ty->isVLSTBuiltinType() && "Invalid SVE Type"); 8802 return Ty->getKind() == BuiltinType::SveBool 8803 ? (Context.getLangOpts().VScaleMin * 128) / Context.getCharWidth() 8804 : Context.getLangOpts().VScaleMin * 128; 8805 } 8806 8807 bool ASTContext::areCompatibleSveTypes(QualType FirstType, 8808 QualType SecondType) { 8809 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || 8810 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && 8811 "Expected SVE builtin type and vector type!"); 8812 8813 auto IsValidCast = [this](QualType FirstType, QualType SecondType) { 8814 if (const auto *BT = FirstType->getAs<BuiltinType>()) { 8815 if (const auto *VT = SecondType->getAs<VectorType>()) { 8816 // Predicates have the same representation as uint8 so we also have to 8817 // check the kind to make these types incompatible. 8818 if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) 8819 return BT->getKind() == BuiltinType::SveBool; 8820 else if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) 8821 return VT->getElementType().getCanonicalType() == 8822 FirstType->getSveEltType(*this); 8823 else if (VT->getVectorKind() == VectorType::GenericVector) 8824 return getTypeSize(SecondType) == getSVETypeSize(*this, BT) && 8825 hasSameType(VT->getElementType(), 8826 getBuiltinVectorTypeInfo(BT).ElementType); 8827 } 8828 } 8829 return false; 8830 }; 8831 8832 return IsValidCast(FirstType, SecondType) || 8833 IsValidCast(SecondType, FirstType); 8834 } 8835 8836 bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType, 8837 QualType SecondType) { 8838 assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) || 8839 (FirstType->isVectorType() && SecondType->isSizelessBuiltinType())) && 8840 "Expected SVE builtin type and vector type!"); 8841 8842 auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) { 8843 const auto *BT = FirstType->getAs<BuiltinType>(); 8844 if (!BT) 8845 return false; 8846 8847 const auto *VecTy = SecondType->getAs<VectorType>(); 8848 if (VecTy && 8849 (VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector || 8850 VecTy->getVectorKind() == VectorType::GenericVector)) { 8851 const LangOptions::LaxVectorConversionKind LVCKind = 8852 getLangOpts().getLaxVectorConversions(); 8853 8854 // Can not convert between sve predicates and sve vectors because of 8855 // different size. 8856 if (BT->getKind() == BuiltinType::SveBool && 8857 VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector) 8858 return false; 8859 8860 // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion. 8861 // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly 8862 // converts to VLAT and VLAT implicitly converts to GNUT." 8863 // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and 8864 // predicates. 8865 if (VecTy->getVectorKind() == VectorType::GenericVector && 8866 getTypeSize(SecondType) != getSVETypeSize(*this, BT)) 8867 return false; 8868 8869 // If -flax-vector-conversions=all is specified, the types are 8870 // certainly compatible. 8871 if (LVCKind == LangOptions::LaxVectorConversionKind::All) 8872 return true; 8873 8874 // If -flax-vector-conversions=integer is specified, the types are 8875 // compatible if the elements are integer types. 8876 if (LVCKind == LangOptions::LaxVectorConversionKind::Integer) 8877 return VecTy->getElementType().getCanonicalType()->isIntegerType() && 8878 FirstType->getSveEltType(*this)->isIntegerType(); 8879 } 8880 8881 return false; 8882 }; 8883 8884 return IsLaxCompatible(FirstType, SecondType) || 8885 IsLaxCompatible(SecondType, FirstType); 8886 } 8887 8888 bool ASTContext::hasDirectOwnershipQualifier(QualType Ty) const { 8889 while (true) { 8890 // __strong id 8891 if (const AttributedType *Attr = dyn_cast<AttributedType>(Ty)) { 8892 if (Attr->getAttrKind() == attr::ObjCOwnership) 8893 return true; 8894 8895 Ty = Attr->getModifiedType(); 8896 8897 // X *__strong (...) 8898 } else if (const ParenType *Paren = dyn_cast<ParenType>(Ty)) { 8899 Ty = Paren->getInnerType(); 8900 8901 // We do not want to look through typedefs, typeof(expr), 8902 // typeof(type), or any other way that the type is somehow 8903 // abstracted. 8904 } else { 8905 return false; 8906 } 8907 } 8908 } 8909 8910 //===----------------------------------------------------------------------===// 8911 // ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's. 8912 //===----------------------------------------------------------------------===// 8913 8914 /// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the 8915 /// inheritance hierarchy of 'rProto'. 8916 bool 8917 ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto, 8918 ObjCProtocolDecl *rProto) const { 8919 if (declaresSameEntity(lProto, rProto)) 8920 return true; 8921 for (auto *PI : rProto->protocols()) 8922 if (ProtocolCompatibleWithProtocol(lProto, PI)) 8923 return true; 8924 return false; 8925 } 8926 8927 /// ObjCQualifiedClassTypesAreCompatible - compare Class<pr,...> and 8928 /// Class<pr1, ...>. 8929 bool ASTContext::ObjCQualifiedClassTypesAreCompatible( 8930 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs) { 8931 for (auto *lhsProto : lhs->quals()) { 8932 bool match = false; 8933 for (auto *rhsProto : rhs->quals()) { 8934 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) { 8935 match = true; 8936 break; 8937 } 8938 } 8939 if (!match) 8940 return false; 8941 } 8942 return true; 8943 } 8944 8945 /// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an 8946 /// ObjCQualifiedIDType. 8947 bool ASTContext::ObjCQualifiedIdTypesAreCompatible( 8948 const ObjCObjectPointerType *lhs, const ObjCObjectPointerType *rhs, 8949 bool compare) { 8950 // Allow id<P..> and an 'id' in all cases. 8951 if (lhs->isObjCIdType() || rhs->isObjCIdType()) 8952 return true; 8953 8954 // Don't allow id<P..> to convert to Class or Class<P..> in either direction. 8955 if (lhs->isObjCClassType() || lhs->isObjCQualifiedClassType() || 8956 rhs->isObjCClassType() || rhs->isObjCQualifiedClassType()) 8957 return false; 8958 8959 if (lhs->isObjCQualifiedIdType()) { 8960 if (rhs->qual_empty()) { 8961 // If the RHS is a unqualified interface pointer "NSString*", 8962 // make sure we check the class hierarchy. 8963 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 8964 for (auto *I : lhs->quals()) { 8965 // when comparing an id<P> on lhs with a static type on rhs, 8966 // see if static class implements all of id's protocols, directly or 8967 // through its super class and categories. 8968 if (!rhsID->ClassImplementsProtocol(I, true)) 8969 return false; 8970 } 8971 } 8972 // If there are no qualifiers and no interface, we have an 'id'. 8973 return true; 8974 } 8975 // Both the right and left sides have qualifiers. 8976 for (auto *lhsProto : lhs->quals()) { 8977 bool match = false; 8978 8979 // when comparing an id<P> on lhs with a static type on rhs, 8980 // see if static class implements all of id's protocols, directly or 8981 // through its super class and categories. 8982 for (auto *rhsProto : rhs->quals()) { 8983 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 8984 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 8985 match = true; 8986 break; 8987 } 8988 } 8989 // If the RHS is a qualified interface pointer "NSString<P>*", 8990 // make sure we check the class hierarchy. 8991 if (ObjCInterfaceDecl *rhsID = rhs->getInterfaceDecl()) { 8992 for (auto *I : lhs->quals()) { 8993 // when comparing an id<P> on lhs with a static type on rhs, 8994 // see if static class implements all of id's protocols, directly or 8995 // through its super class and categories. 8996 if (rhsID->ClassImplementsProtocol(I, true)) { 8997 match = true; 8998 break; 8999 } 9000 } 9001 } 9002 if (!match) 9003 return false; 9004 } 9005 9006 return true; 9007 } 9008 9009 assert(rhs->isObjCQualifiedIdType() && "One of the LHS/RHS should be id<x>"); 9010 9011 if (lhs->getInterfaceType()) { 9012 // If both the right and left sides have qualifiers. 9013 for (auto *lhsProto : lhs->quals()) { 9014 bool match = false; 9015 9016 // when comparing an id<P> on rhs with a static type on lhs, 9017 // see if static class implements all of id's protocols, directly or 9018 // through its super class and categories. 9019 // First, lhs protocols in the qualifier list must be found, direct 9020 // or indirect in rhs's qualifier list or it is a mismatch. 9021 for (auto *rhsProto : rhs->quals()) { 9022 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9023 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9024 match = true; 9025 break; 9026 } 9027 } 9028 if (!match) 9029 return false; 9030 } 9031 9032 // Static class's protocols, or its super class or category protocols 9033 // must be found, direct or indirect in rhs's qualifier list or it is a mismatch. 9034 if (ObjCInterfaceDecl *lhsID = lhs->getInterfaceDecl()) { 9035 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols; 9036 CollectInheritedProtocols(lhsID, LHSInheritedProtocols); 9037 // This is rather dubious but matches gcc's behavior. If lhs has 9038 // no type qualifier and its class has no static protocol(s) 9039 // assume that it is mismatch. 9040 if (LHSInheritedProtocols.empty() && lhs->qual_empty()) 9041 return false; 9042 for (auto *lhsProto : LHSInheritedProtocols) { 9043 bool match = false; 9044 for (auto *rhsProto : rhs->quals()) { 9045 if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) || 9046 (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) { 9047 match = true; 9048 break; 9049 } 9050 } 9051 if (!match) 9052 return false; 9053 } 9054 } 9055 return true; 9056 } 9057 return false; 9058 } 9059 9060 /// canAssignObjCInterfaces - Return true if the two interface types are 9061 /// compatible for assignment from RHS to LHS. This handles validation of any 9062 /// protocol qualifiers on the LHS or RHS. 9063 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT, 9064 const ObjCObjectPointerType *RHSOPT) { 9065 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9066 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9067 9068 // If either type represents the built-in 'id' type, return true. 9069 if (LHS->isObjCUnqualifiedId() || RHS->isObjCUnqualifiedId()) 9070 return true; 9071 9072 // Function object that propagates a successful result or handles 9073 // __kindof types. 9074 auto finish = [&](bool succeeded) -> bool { 9075 if (succeeded) 9076 return true; 9077 9078 if (!RHS->isKindOfType()) 9079 return false; 9080 9081 // Strip off __kindof and protocol qualifiers, then check whether 9082 // we can assign the other way. 9083 return canAssignObjCInterfaces(RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9084 LHSOPT->stripObjCKindOfTypeAndQuals(*this)); 9085 }; 9086 9087 // Casts from or to id<P> are allowed when the other side has compatible 9088 // protocols. 9089 if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId()) { 9090 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false)); 9091 } 9092 9093 // Verify protocol compatibility for casts from Class<P1> to Class<P2>. 9094 if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass()) { 9095 return finish(ObjCQualifiedClassTypesAreCompatible(LHSOPT, RHSOPT)); 9096 } 9097 9098 // Casts from Class to Class<Foo>, or vice-versa, are allowed. 9099 if (LHS->isObjCClass() && RHS->isObjCClass()) { 9100 return true; 9101 } 9102 9103 // If we have 2 user-defined types, fall into that path. 9104 if (LHS->getInterface() && RHS->getInterface()) { 9105 return finish(canAssignObjCInterfaces(LHS, RHS)); 9106 } 9107 9108 return false; 9109 } 9110 9111 /// canAssignObjCInterfacesInBlockPointer - This routine is specifically written 9112 /// for providing type-safety for objective-c pointers used to pass/return 9113 /// arguments in block literals. When passed as arguments, passing 'A*' where 9114 /// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is 9115 /// not OK. For the return type, the opposite is not OK. 9116 bool ASTContext::canAssignObjCInterfacesInBlockPointer( 9117 const ObjCObjectPointerType *LHSOPT, 9118 const ObjCObjectPointerType *RHSOPT, 9119 bool BlockReturnType) { 9120 9121 // Function object that propagates a successful result or handles 9122 // __kindof types. 9123 auto finish = [&](bool succeeded) -> bool { 9124 if (succeeded) 9125 return true; 9126 9127 const ObjCObjectPointerType *Expected = BlockReturnType ? RHSOPT : LHSOPT; 9128 if (!Expected->isKindOfType()) 9129 return false; 9130 9131 // Strip off __kindof and protocol qualifiers, then check whether 9132 // we can assign the other way. 9133 return canAssignObjCInterfacesInBlockPointer( 9134 RHSOPT->stripObjCKindOfTypeAndQuals(*this), 9135 LHSOPT->stripObjCKindOfTypeAndQuals(*this), 9136 BlockReturnType); 9137 }; 9138 9139 if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType()) 9140 return true; 9141 9142 if (LHSOPT->isObjCBuiltinType()) { 9143 return finish(RHSOPT->isObjCBuiltinType() || 9144 RHSOPT->isObjCQualifiedIdType()); 9145 } 9146 9147 if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType()) { 9148 if (getLangOpts().CompatibilityQualifiedIdBlockParamTypeChecking) 9149 // Use for block parameters previous type checking for compatibility. 9150 return finish(ObjCQualifiedIdTypesAreCompatible(LHSOPT, RHSOPT, false) || 9151 // Or corrected type checking as in non-compat mode. 9152 (!BlockReturnType && 9153 ObjCQualifiedIdTypesAreCompatible(RHSOPT, LHSOPT, false))); 9154 else 9155 return finish(ObjCQualifiedIdTypesAreCompatible( 9156 (BlockReturnType ? LHSOPT : RHSOPT), 9157 (BlockReturnType ? RHSOPT : LHSOPT), false)); 9158 } 9159 9160 const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType(); 9161 const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType(); 9162 if (LHS && RHS) { // We have 2 user-defined types. 9163 if (LHS != RHS) { 9164 if (LHS->getDecl()->isSuperClassOf(RHS->getDecl())) 9165 return finish(BlockReturnType); 9166 if (RHS->getDecl()->isSuperClassOf(LHS->getDecl())) 9167 return finish(!BlockReturnType); 9168 } 9169 else 9170 return true; 9171 } 9172 return false; 9173 } 9174 9175 /// Comparison routine for Objective-C protocols to be used with 9176 /// llvm::array_pod_sort. 9177 static int compareObjCProtocolsByName(ObjCProtocolDecl * const *lhs, 9178 ObjCProtocolDecl * const *rhs) { 9179 return (*lhs)->getName().compare((*rhs)->getName()); 9180 } 9181 9182 /// getIntersectionOfProtocols - This routine finds the intersection of set 9183 /// of protocols inherited from two distinct objective-c pointer objects with 9184 /// the given common base. 9185 /// It is used to build composite qualifier list of the composite type of 9186 /// the conditional expression involving two objective-c pointer objects. 9187 static 9188 void getIntersectionOfProtocols(ASTContext &Context, 9189 const ObjCInterfaceDecl *CommonBase, 9190 const ObjCObjectPointerType *LHSOPT, 9191 const ObjCObjectPointerType *RHSOPT, 9192 SmallVectorImpl<ObjCProtocolDecl *> &IntersectionSet) { 9193 9194 const ObjCObjectType* LHS = LHSOPT->getObjectType(); 9195 const ObjCObjectType* RHS = RHSOPT->getObjectType(); 9196 assert(LHS->getInterface() && "LHS must have an interface base"); 9197 assert(RHS->getInterface() && "RHS must have an interface base"); 9198 9199 // Add all of the protocols for the LHS. 9200 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSProtocolSet; 9201 9202 // Start with the protocol qualifiers. 9203 for (auto proto : LHS->quals()) { 9204 Context.CollectInheritedProtocols(proto, LHSProtocolSet); 9205 } 9206 9207 // Also add the protocols associated with the LHS interface. 9208 Context.CollectInheritedProtocols(LHS->getInterface(), LHSProtocolSet); 9209 9210 // Add all of the protocols for the RHS. 9211 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSProtocolSet; 9212 9213 // Start with the protocol qualifiers. 9214 for (auto proto : RHS->quals()) { 9215 Context.CollectInheritedProtocols(proto, RHSProtocolSet); 9216 } 9217 9218 // Also add the protocols associated with the RHS interface. 9219 Context.CollectInheritedProtocols(RHS->getInterface(), RHSProtocolSet); 9220 9221 // Compute the intersection of the collected protocol sets. 9222 for (auto proto : LHSProtocolSet) { 9223 if (RHSProtocolSet.count(proto)) 9224 IntersectionSet.push_back(proto); 9225 } 9226 9227 // Compute the set of protocols that is implied by either the common type or 9228 // the protocols within the intersection. 9229 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ImpliedProtocols; 9230 Context.CollectInheritedProtocols(CommonBase, ImpliedProtocols); 9231 9232 // Remove any implied protocols from the list of inherited protocols. 9233 if (!ImpliedProtocols.empty()) { 9234 llvm::erase_if(IntersectionSet, [&](ObjCProtocolDecl *proto) -> bool { 9235 return ImpliedProtocols.count(proto) > 0; 9236 }); 9237 } 9238 9239 // Sort the remaining protocols by name. 9240 llvm::array_pod_sort(IntersectionSet.begin(), IntersectionSet.end(), 9241 compareObjCProtocolsByName); 9242 } 9243 9244 /// Determine whether the first type is a subtype of the second. 9245 static bool canAssignObjCObjectTypes(ASTContext &ctx, QualType lhs, 9246 QualType rhs) { 9247 // Common case: two object pointers. 9248 const auto *lhsOPT = lhs->getAs<ObjCObjectPointerType>(); 9249 const auto *rhsOPT = rhs->getAs<ObjCObjectPointerType>(); 9250 if (lhsOPT && rhsOPT) 9251 return ctx.canAssignObjCInterfaces(lhsOPT, rhsOPT); 9252 9253 // Two block pointers. 9254 const auto *lhsBlock = lhs->getAs<BlockPointerType>(); 9255 const auto *rhsBlock = rhs->getAs<BlockPointerType>(); 9256 if (lhsBlock && rhsBlock) 9257 return ctx.typesAreBlockPointerCompatible(lhs, rhs); 9258 9259 // If either is an unqualified 'id' and the other is a block, it's 9260 // acceptable. 9261 if ((lhsOPT && lhsOPT->isObjCIdType() && rhsBlock) || 9262 (rhsOPT && rhsOPT->isObjCIdType() && lhsBlock)) 9263 return true; 9264 9265 return false; 9266 } 9267 9268 // Check that the given Objective-C type argument lists are equivalent. 9269 static bool sameObjCTypeArgs(ASTContext &ctx, 9270 const ObjCInterfaceDecl *iface, 9271 ArrayRef<QualType> lhsArgs, 9272 ArrayRef<QualType> rhsArgs, 9273 bool stripKindOf) { 9274 if (lhsArgs.size() != rhsArgs.size()) 9275 return false; 9276 9277 ObjCTypeParamList *typeParams = iface->getTypeParamList(); 9278 for (unsigned i = 0, n = lhsArgs.size(); i != n; ++i) { 9279 if (ctx.hasSameType(lhsArgs[i], rhsArgs[i])) 9280 continue; 9281 9282 switch (typeParams->begin()[i]->getVariance()) { 9283 case ObjCTypeParamVariance::Invariant: 9284 if (!stripKindOf || 9285 !ctx.hasSameType(lhsArgs[i].stripObjCKindOfType(ctx), 9286 rhsArgs[i].stripObjCKindOfType(ctx))) { 9287 return false; 9288 } 9289 break; 9290 9291 case ObjCTypeParamVariance::Covariant: 9292 if (!canAssignObjCObjectTypes(ctx, lhsArgs[i], rhsArgs[i])) 9293 return false; 9294 break; 9295 9296 case ObjCTypeParamVariance::Contravariant: 9297 if (!canAssignObjCObjectTypes(ctx, rhsArgs[i], lhsArgs[i])) 9298 return false; 9299 break; 9300 } 9301 } 9302 9303 return true; 9304 } 9305 9306 QualType ASTContext::areCommonBaseCompatible( 9307 const ObjCObjectPointerType *Lptr, 9308 const ObjCObjectPointerType *Rptr) { 9309 const ObjCObjectType *LHS = Lptr->getObjectType(); 9310 const ObjCObjectType *RHS = Rptr->getObjectType(); 9311 const ObjCInterfaceDecl* LDecl = LHS->getInterface(); 9312 const ObjCInterfaceDecl* RDecl = RHS->getInterface(); 9313 9314 if (!LDecl || !RDecl) 9315 return {}; 9316 9317 // When either LHS or RHS is a kindof type, we should return a kindof type. 9318 // For example, for common base of kindof(ASub1) and kindof(ASub2), we return 9319 // kindof(A). 9320 bool anyKindOf = LHS->isKindOfType() || RHS->isKindOfType(); 9321 9322 // Follow the left-hand side up the class hierarchy until we either hit a 9323 // root or find the RHS. Record the ancestors in case we don't find it. 9324 llvm::SmallDenseMap<const ObjCInterfaceDecl *, const ObjCObjectType *, 4> 9325 LHSAncestors; 9326 while (true) { 9327 // Record this ancestor. We'll need this if the common type isn't in the 9328 // path from the LHS to the root. 9329 LHSAncestors[LHS->getInterface()->getCanonicalDecl()] = LHS; 9330 9331 if (declaresSameEntity(LHS->getInterface(), RDecl)) { 9332 // Get the type arguments. 9333 ArrayRef<QualType> LHSTypeArgs = LHS->getTypeArgsAsWritten(); 9334 bool anyChanges = false; 9335 if (LHS->isSpecialized() && RHS->isSpecialized()) { 9336 // Both have type arguments, compare them. 9337 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 9338 LHS->getTypeArgs(), RHS->getTypeArgs(), 9339 /*stripKindOf=*/true)) 9340 return {}; 9341 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 9342 // If only one has type arguments, the result will not have type 9343 // arguments. 9344 LHSTypeArgs = {}; 9345 anyChanges = true; 9346 } 9347 9348 // Compute the intersection of protocols. 9349 SmallVector<ObjCProtocolDecl *, 8> Protocols; 9350 getIntersectionOfProtocols(*this, LHS->getInterface(), Lptr, Rptr, 9351 Protocols); 9352 if (!Protocols.empty()) 9353 anyChanges = true; 9354 9355 // If anything in the LHS will have changed, build a new result type. 9356 // If we need to return a kindof type but LHS is not a kindof type, we 9357 // build a new result type. 9358 if (anyChanges || LHS->isKindOfType() != anyKindOf) { 9359 QualType Result = getObjCInterfaceType(LHS->getInterface()); 9360 Result = getObjCObjectType(Result, LHSTypeArgs, Protocols, 9361 anyKindOf || LHS->isKindOfType()); 9362 return getObjCObjectPointerType(Result); 9363 } 9364 9365 return getObjCObjectPointerType(QualType(LHS, 0)); 9366 } 9367 9368 // Find the superclass. 9369 QualType LHSSuperType = LHS->getSuperClassType(); 9370 if (LHSSuperType.isNull()) 9371 break; 9372 9373 LHS = LHSSuperType->castAs<ObjCObjectType>(); 9374 } 9375 9376 // We didn't find anything by following the LHS to its root; now check 9377 // the RHS against the cached set of ancestors. 9378 while (true) { 9379 auto KnownLHS = LHSAncestors.find(RHS->getInterface()->getCanonicalDecl()); 9380 if (KnownLHS != LHSAncestors.end()) { 9381 LHS = KnownLHS->second; 9382 9383 // Get the type arguments. 9384 ArrayRef<QualType> RHSTypeArgs = RHS->getTypeArgsAsWritten(); 9385 bool anyChanges = false; 9386 if (LHS->isSpecialized() && RHS->isSpecialized()) { 9387 // Both have type arguments, compare them. 9388 if (!sameObjCTypeArgs(*this, LHS->getInterface(), 9389 LHS->getTypeArgs(), RHS->getTypeArgs(), 9390 /*stripKindOf=*/true)) 9391 return {}; 9392 } else if (LHS->isSpecialized() != RHS->isSpecialized()) { 9393 // If only one has type arguments, the result will not have type 9394 // arguments. 9395 RHSTypeArgs = {}; 9396 anyChanges = true; 9397 } 9398 9399 // Compute the intersection of protocols. 9400 SmallVector<ObjCProtocolDecl *, 8> Protocols; 9401 getIntersectionOfProtocols(*this, RHS->getInterface(), Lptr, Rptr, 9402 Protocols); 9403 if (!Protocols.empty()) 9404 anyChanges = true; 9405 9406 // If we need to return a kindof type but RHS is not a kindof type, we 9407 // build a new result type. 9408 if (anyChanges || RHS->isKindOfType() != anyKindOf) { 9409 QualType Result = getObjCInterfaceType(RHS->getInterface()); 9410 Result = getObjCObjectType(Result, RHSTypeArgs, Protocols, 9411 anyKindOf || RHS->isKindOfType()); 9412 return getObjCObjectPointerType(Result); 9413 } 9414 9415 return getObjCObjectPointerType(QualType(RHS, 0)); 9416 } 9417 9418 // Find the superclass of the RHS. 9419 QualType RHSSuperType = RHS->getSuperClassType(); 9420 if (RHSSuperType.isNull()) 9421 break; 9422 9423 RHS = RHSSuperType->castAs<ObjCObjectType>(); 9424 } 9425 9426 return {}; 9427 } 9428 9429 bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS, 9430 const ObjCObjectType *RHS) { 9431 assert(LHS->getInterface() && "LHS is not an interface type"); 9432 assert(RHS->getInterface() && "RHS is not an interface type"); 9433 9434 // Verify that the base decls are compatible: the RHS must be a subclass of 9435 // the LHS. 9436 ObjCInterfaceDecl *LHSInterface = LHS->getInterface(); 9437 bool IsSuperClass = LHSInterface->isSuperClassOf(RHS->getInterface()); 9438 if (!IsSuperClass) 9439 return false; 9440 9441 // If the LHS has protocol qualifiers, determine whether all of them are 9442 // satisfied by the RHS (i.e., the RHS has a superset of the protocols in the 9443 // LHS). 9444 if (LHS->getNumProtocols() > 0) { 9445 // OK if conversion of LHS to SuperClass results in narrowing of types 9446 // ; i.e., SuperClass may implement at least one of the protocols 9447 // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok. 9448 // But not SuperObj<P1,P2,P3> = lhs<P1,P2>. 9449 llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols; 9450 CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols); 9451 // Also, if RHS has explicit quelifiers, include them for comparing with LHS's 9452 // qualifiers. 9453 for (auto *RHSPI : RHS->quals()) 9454 CollectInheritedProtocols(RHSPI, SuperClassInheritedProtocols); 9455 // If there is no protocols associated with RHS, it is not a match. 9456 if (SuperClassInheritedProtocols.empty()) 9457 return false; 9458 9459 for (const auto *LHSProto : LHS->quals()) { 9460 bool SuperImplementsProtocol = false; 9461 for (auto *SuperClassProto : SuperClassInheritedProtocols) 9462 if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) { 9463 SuperImplementsProtocol = true; 9464 break; 9465 } 9466 if (!SuperImplementsProtocol) 9467 return false; 9468 } 9469 } 9470 9471 // If the LHS is specialized, we may need to check type arguments. 9472 if (LHS->isSpecialized()) { 9473 // Follow the superclass chain until we've matched the LHS class in the 9474 // hierarchy. This substitutes type arguments through. 9475 const ObjCObjectType *RHSSuper = RHS; 9476 while (!declaresSameEntity(RHSSuper->getInterface(), LHSInterface)) 9477 RHSSuper = RHSSuper->getSuperClassType()->castAs<ObjCObjectType>(); 9478 9479 // If the RHS is specializd, compare type arguments. 9480 if (RHSSuper->isSpecialized() && 9481 !sameObjCTypeArgs(*this, LHS->getInterface(), 9482 LHS->getTypeArgs(), RHSSuper->getTypeArgs(), 9483 /*stripKindOf=*/true)) { 9484 return false; 9485 } 9486 } 9487 9488 return true; 9489 } 9490 9491 bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) { 9492 // get the "pointed to" types 9493 const auto *LHSOPT = LHS->getAs<ObjCObjectPointerType>(); 9494 const auto *RHSOPT = RHS->getAs<ObjCObjectPointerType>(); 9495 9496 if (!LHSOPT || !RHSOPT) 9497 return false; 9498 9499 return canAssignObjCInterfaces(LHSOPT, RHSOPT) || 9500 canAssignObjCInterfaces(RHSOPT, LHSOPT); 9501 } 9502 9503 bool ASTContext::canBindObjCObjectType(QualType To, QualType From) { 9504 return canAssignObjCInterfaces( 9505 getObjCObjectPointerType(To)->castAs<ObjCObjectPointerType>(), 9506 getObjCObjectPointerType(From)->castAs<ObjCObjectPointerType>()); 9507 } 9508 9509 /// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible, 9510 /// both shall have the identically qualified version of a compatible type. 9511 /// C99 6.2.7p1: Two types have compatible types if their types are the 9512 /// same. See 6.7.[2,3,5] for additional rules. 9513 bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS, 9514 bool CompareUnqualified) { 9515 if (getLangOpts().CPlusPlus) 9516 return hasSameType(LHS, RHS); 9517 9518 return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull(); 9519 } 9520 9521 bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) { 9522 return typesAreCompatible(LHS, RHS); 9523 } 9524 9525 bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) { 9526 return !mergeTypes(LHS, RHS, true).isNull(); 9527 } 9528 9529 /// mergeTransparentUnionType - if T is a transparent union type and a member 9530 /// of T is compatible with SubType, return the merged type, else return 9531 /// QualType() 9532 QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType, 9533 bool OfBlockPointer, 9534 bool Unqualified) { 9535 if (const RecordType *UT = T->getAsUnionType()) { 9536 RecordDecl *UD = UT->getDecl(); 9537 if (UD->hasAttr<TransparentUnionAttr>()) { 9538 for (const auto *I : UD->fields()) { 9539 QualType ET = I->getType().getUnqualifiedType(); 9540 QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified); 9541 if (!MT.isNull()) 9542 return MT; 9543 } 9544 } 9545 } 9546 9547 return {}; 9548 } 9549 9550 /// mergeFunctionParameterTypes - merge two types which appear as function 9551 /// parameter types 9552 QualType ASTContext::mergeFunctionParameterTypes(QualType lhs, QualType rhs, 9553 bool OfBlockPointer, 9554 bool Unqualified) { 9555 // GNU extension: two types are compatible if they appear as a function 9556 // argument, one of the types is a transparent union type and the other 9557 // type is compatible with a union member 9558 QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer, 9559 Unqualified); 9560 if (!lmerge.isNull()) 9561 return lmerge; 9562 9563 QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer, 9564 Unqualified); 9565 if (!rmerge.isNull()) 9566 return rmerge; 9567 9568 return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified); 9569 } 9570 9571 QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs, 9572 bool OfBlockPointer, bool Unqualified, 9573 bool AllowCXX) { 9574 const auto *lbase = lhs->castAs<FunctionType>(); 9575 const auto *rbase = rhs->castAs<FunctionType>(); 9576 const auto *lproto = dyn_cast<FunctionProtoType>(lbase); 9577 const auto *rproto = dyn_cast<FunctionProtoType>(rbase); 9578 bool allLTypes = true; 9579 bool allRTypes = true; 9580 9581 // Check return type 9582 QualType retType; 9583 if (OfBlockPointer) { 9584 QualType RHS = rbase->getReturnType(); 9585 QualType LHS = lbase->getReturnType(); 9586 bool UnqualifiedResult = Unqualified; 9587 if (!UnqualifiedResult) 9588 UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers()); 9589 retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true); 9590 } 9591 else 9592 retType = mergeTypes(lbase->getReturnType(), rbase->getReturnType(), false, 9593 Unqualified); 9594 if (retType.isNull()) 9595 return {}; 9596 9597 if (Unqualified) 9598 retType = retType.getUnqualifiedType(); 9599 9600 CanQualType LRetType = getCanonicalType(lbase->getReturnType()); 9601 CanQualType RRetType = getCanonicalType(rbase->getReturnType()); 9602 if (Unqualified) { 9603 LRetType = LRetType.getUnqualifiedType(); 9604 RRetType = RRetType.getUnqualifiedType(); 9605 } 9606 9607 if (getCanonicalType(retType) != LRetType) 9608 allLTypes = false; 9609 if (getCanonicalType(retType) != RRetType) 9610 allRTypes = false; 9611 9612 // FIXME: double check this 9613 // FIXME: should we error if lbase->getRegParmAttr() != 0 && 9614 // rbase->getRegParmAttr() != 0 && 9615 // lbase->getRegParmAttr() != rbase->getRegParmAttr()? 9616 FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo(); 9617 FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo(); 9618 9619 // Compatible functions must have compatible calling conventions 9620 if (lbaseInfo.getCC() != rbaseInfo.getCC()) 9621 return {}; 9622 9623 // Regparm is part of the calling convention. 9624 if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm()) 9625 return {}; 9626 if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm()) 9627 return {}; 9628 9629 if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult()) 9630 return {}; 9631 if (lbaseInfo.getNoCallerSavedRegs() != rbaseInfo.getNoCallerSavedRegs()) 9632 return {}; 9633 if (lbaseInfo.getNoCfCheck() != rbaseInfo.getNoCfCheck()) 9634 return {}; 9635 9636 // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'. 9637 bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn(); 9638 9639 if (lbaseInfo.getNoReturn() != NoReturn) 9640 allLTypes = false; 9641 if (rbaseInfo.getNoReturn() != NoReturn) 9642 allRTypes = false; 9643 9644 FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn); 9645 9646 if (lproto && rproto) { // two C99 style function prototypes 9647 assert((AllowCXX || 9648 (!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec())) && 9649 "C++ shouldn't be here"); 9650 // Compatible functions must have the same number of parameters 9651 if (lproto->getNumParams() != rproto->getNumParams()) 9652 return {}; 9653 9654 // Variadic and non-variadic functions aren't compatible 9655 if (lproto->isVariadic() != rproto->isVariadic()) 9656 return {}; 9657 9658 if (lproto->getMethodQuals() != rproto->getMethodQuals()) 9659 return {}; 9660 9661 SmallVector<FunctionProtoType::ExtParameterInfo, 4> newParamInfos; 9662 bool canUseLeft, canUseRight; 9663 if (!mergeExtParameterInfo(lproto, rproto, canUseLeft, canUseRight, 9664 newParamInfos)) 9665 return {}; 9666 9667 if (!canUseLeft) 9668 allLTypes = false; 9669 if (!canUseRight) 9670 allRTypes = false; 9671 9672 // Check parameter type compatibility 9673 SmallVector<QualType, 10> types; 9674 for (unsigned i = 0, n = lproto->getNumParams(); i < n; i++) { 9675 QualType lParamType = lproto->getParamType(i).getUnqualifiedType(); 9676 QualType rParamType = rproto->getParamType(i).getUnqualifiedType(); 9677 QualType paramType = mergeFunctionParameterTypes( 9678 lParamType, rParamType, OfBlockPointer, Unqualified); 9679 if (paramType.isNull()) 9680 return {}; 9681 9682 if (Unqualified) 9683 paramType = paramType.getUnqualifiedType(); 9684 9685 types.push_back(paramType); 9686 if (Unqualified) { 9687 lParamType = lParamType.getUnqualifiedType(); 9688 rParamType = rParamType.getUnqualifiedType(); 9689 } 9690 9691 if (getCanonicalType(paramType) != getCanonicalType(lParamType)) 9692 allLTypes = false; 9693 if (getCanonicalType(paramType) != getCanonicalType(rParamType)) 9694 allRTypes = false; 9695 } 9696 9697 if (allLTypes) return lhs; 9698 if (allRTypes) return rhs; 9699 9700 FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo(); 9701 EPI.ExtInfo = einfo; 9702 EPI.ExtParameterInfos = 9703 newParamInfos.empty() ? nullptr : newParamInfos.data(); 9704 return getFunctionType(retType, types, EPI); 9705 } 9706 9707 if (lproto) allRTypes = false; 9708 if (rproto) allLTypes = false; 9709 9710 const FunctionProtoType *proto = lproto ? lproto : rproto; 9711 if (proto) { 9712 assert((AllowCXX || !proto->hasExceptionSpec()) && "C++ shouldn't be here"); 9713 if (proto->isVariadic()) 9714 return {}; 9715 // Check that the types are compatible with the types that 9716 // would result from default argument promotions (C99 6.7.5.3p15). 9717 // The only types actually affected are promotable integer 9718 // types and floats, which would be passed as a different 9719 // type depending on whether the prototype is visible. 9720 for (unsigned i = 0, n = proto->getNumParams(); i < n; ++i) { 9721 QualType paramTy = proto->getParamType(i); 9722 9723 // Look at the converted type of enum types, since that is the type used 9724 // to pass enum values. 9725 if (const auto *Enum = paramTy->getAs<EnumType>()) { 9726 paramTy = Enum->getDecl()->getIntegerType(); 9727 if (paramTy.isNull()) 9728 return {}; 9729 } 9730 9731 if (paramTy->isPromotableIntegerType() || 9732 getCanonicalType(paramTy).getUnqualifiedType() == FloatTy) 9733 return {}; 9734 } 9735 9736 if (allLTypes) return lhs; 9737 if (allRTypes) return rhs; 9738 9739 FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo(); 9740 EPI.ExtInfo = einfo; 9741 return getFunctionType(retType, proto->getParamTypes(), EPI); 9742 } 9743 9744 if (allLTypes) return lhs; 9745 if (allRTypes) return rhs; 9746 return getFunctionNoProtoType(retType, einfo); 9747 } 9748 9749 /// Given that we have an enum type and a non-enum type, try to merge them. 9750 static QualType mergeEnumWithInteger(ASTContext &Context, const EnumType *ET, 9751 QualType other, bool isBlockReturnType) { 9752 // C99 6.7.2.2p4: Each enumerated type shall be compatible with char, 9753 // a signed integer type, or an unsigned integer type. 9754 // Compatibility is based on the underlying type, not the promotion 9755 // type. 9756 QualType underlyingType = ET->getDecl()->getIntegerType(); 9757 if (underlyingType.isNull()) 9758 return {}; 9759 if (Context.hasSameType(underlyingType, other)) 9760 return other; 9761 9762 // In block return types, we're more permissive and accept any 9763 // integral type of the same size. 9764 if (isBlockReturnType && other->isIntegerType() && 9765 Context.getTypeSize(underlyingType) == Context.getTypeSize(other)) 9766 return other; 9767 9768 return {}; 9769 } 9770 9771 QualType ASTContext::mergeTypes(QualType LHS, QualType RHS, 9772 bool OfBlockPointer, 9773 bool Unqualified, bool BlockReturnType) { 9774 // For C++ we will not reach this code with reference types (see below), 9775 // for OpenMP variant call overloading we might. 9776 // 9777 // C++ [expr]: If an expression initially has the type "reference to T", the 9778 // type is adjusted to "T" prior to any further analysis, the expression 9779 // designates the object or function denoted by the reference, and the 9780 // expression is an lvalue unless the reference is an rvalue reference and 9781 // the expression is a function call (possibly inside parentheses). 9782 if (LangOpts.OpenMP && LHS->getAs<ReferenceType>() && 9783 RHS->getAs<ReferenceType>() && LHS->getTypeClass() == RHS->getTypeClass()) 9784 return mergeTypes(LHS->getAs<ReferenceType>()->getPointeeType(), 9785 RHS->getAs<ReferenceType>()->getPointeeType(), 9786 OfBlockPointer, Unqualified, BlockReturnType); 9787 if (LHS->getAs<ReferenceType>() || RHS->getAs<ReferenceType>()) 9788 return {}; 9789 9790 if (Unqualified) { 9791 LHS = LHS.getUnqualifiedType(); 9792 RHS = RHS.getUnqualifiedType(); 9793 } 9794 9795 QualType LHSCan = getCanonicalType(LHS), 9796 RHSCan = getCanonicalType(RHS); 9797 9798 // If two types are identical, they are compatible. 9799 if (LHSCan == RHSCan) 9800 return LHS; 9801 9802 // If the qualifiers are different, the types aren't compatible... mostly. 9803 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 9804 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 9805 if (LQuals != RQuals) { 9806 // If any of these qualifiers are different, we have a type 9807 // mismatch. 9808 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 9809 LQuals.getAddressSpace() != RQuals.getAddressSpace() || 9810 LQuals.getObjCLifetime() != RQuals.getObjCLifetime() || 9811 LQuals.hasUnaligned() != RQuals.hasUnaligned()) 9812 return {}; 9813 9814 // Exactly one GC qualifier difference is allowed: __strong is 9815 // okay if the other type has no GC qualifier but is an Objective 9816 // C object pointer (i.e. implicitly strong by default). We fix 9817 // this by pretending that the unqualified type was actually 9818 // qualified __strong. 9819 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 9820 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 9821 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 9822 9823 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 9824 return {}; 9825 9826 if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) { 9827 return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong)); 9828 } 9829 if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) { 9830 return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS); 9831 } 9832 return {}; 9833 } 9834 9835 // Okay, qualifiers are equal. 9836 9837 Type::TypeClass LHSClass = LHSCan->getTypeClass(); 9838 Type::TypeClass RHSClass = RHSCan->getTypeClass(); 9839 9840 // We want to consider the two function types to be the same for these 9841 // comparisons, just force one to the other. 9842 if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto; 9843 if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto; 9844 9845 // Same as above for arrays 9846 if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray) 9847 LHSClass = Type::ConstantArray; 9848 if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray) 9849 RHSClass = Type::ConstantArray; 9850 9851 // ObjCInterfaces are just specialized ObjCObjects. 9852 if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject; 9853 if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject; 9854 9855 // Canonicalize ExtVector -> Vector. 9856 if (LHSClass == Type::ExtVector) LHSClass = Type::Vector; 9857 if (RHSClass == Type::ExtVector) RHSClass = Type::Vector; 9858 9859 // If the canonical type classes don't match. 9860 if (LHSClass != RHSClass) { 9861 // Note that we only have special rules for turning block enum 9862 // returns into block int returns, not vice-versa. 9863 if (const auto *ETy = LHS->getAs<EnumType>()) { 9864 return mergeEnumWithInteger(*this, ETy, RHS, false); 9865 } 9866 if (const EnumType* ETy = RHS->getAs<EnumType>()) { 9867 return mergeEnumWithInteger(*this, ETy, LHS, BlockReturnType); 9868 } 9869 // allow block pointer type to match an 'id' type. 9870 if (OfBlockPointer && !BlockReturnType) { 9871 if (LHS->isObjCIdType() && RHS->isBlockPointerType()) 9872 return LHS; 9873 if (RHS->isObjCIdType() && LHS->isBlockPointerType()) 9874 return RHS; 9875 } 9876 9877 return {}; 9878 } 9879 9880 // The canonical type classes match. 9881 switch (LHSClass) { 9882 #define TYPE(Class, Base) 9883 #define ABSTRACT_TYPE(Class, Base) 9884 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 9885 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 9886 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 9887 #include "clang/AST/TypeNodes.inc" 9888 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 9889 9890 case Type::Auto: 9891 case Type::DeducedTemplateSpecialization: 9892 case Type::LValueReference: 9893 case Type::RValueReference: 9894 case Type::MemberPointer: 9895 llvm_unreachable("C++ should never be in mergeTypes"); 9896 9897 case Type::ObjCInterface: 9898 case Type::IncompleteArray: 9899 case Type::VariableArray: 9900 case Type::FunctionProto: 9901 case Type::ExtVector: 9902 llvm_unreachable("Types are eliminated above"); 9903 9904 case Type::Pointer: 9905 { 9906 // Merge two pointer types, while trying to preserve typedef info 9907 QualType LHSPointee = LHS->castAs<PointerType>()->getPointeeType(); 9908 QualType RHSPointee = RHS->castAs<PointerType>()->getPointeeType(); 9909 if (Unqualified) { 9910 LHSPointee = LHSPointee.getUnqualifiedType(); 9911 RHSPointee = RHSPointee.getUnqualifiedType(); 9912 } 9913 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false, 9914 Unqualified); 9915 if (ResultType.isNull()) 9916 return {}; 9917 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 9918 return LHS; 9919 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 9920 return RHS; 9921 return getPointerType(ResultType); 9922 } 9923 case Type::BlockPointer: 9924 { 9925 // Merge two block pointer types, while trying to preserve typedef info 9926 QualType LHSPointee = LHS->castAs<BlockPointerType>()->getPointeeType(); 9927 QualType RHSPointee = RHS->castAs<BlockPointerType>()->getPointeeType(); 9928 if (Unqualified) { 9929 LHSPointee = LHSPointee.getUnqualifiedType(); 9930 RHSPointee = RHSPointee.getUnqualifiedType(); 9931 } 9932 if (getLangOpts().OpenCL) { 9933 Qualifiers LHSPteeQual = LHSPointee.getQualifiers(); 9934 Qualifiers RHSPteeQual = RHSPointee.getQualifiers(); 9935 // Blocks can't be an expression in a ternary operator (OpenCL v2.0 9936 // 6.12.5) thus the following check is asymmetric. 9937 if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual)) 9938 return {}; 9939 LHSPteeQual.removeAddressSpace(); 9940 RHSPteeQual.removeAddressSpace(); 9941 LHSPointee = 9942 QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue()); 9943 RHSPointee = 9944 QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue()); 9945 } 9946 QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer, 9947 Unqualified); 9948 if (ResultType.isNull()) 9949 return {}; 9950 if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType)) 9951 return LHS; 9952 if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType)) 9953 return RHS; 9954 return getBlockPointerType(ResultType); 9955 } 9956 case Type::Atomic: 9957 { 9958 // Merge two pointer types, while trying to preserve typedef info 9959 QualType LHSValue = LHS->castAs<AtomicType>()->getValueType(); 9960 QualType RHSValue = RHS->castAs<AtomicType>()->getValueType(); 9961 if (Unqualified) { 9962 LHSValue = LHSValue.getUnqualifiedType(); 9963 RHSValue = RHSValue.getUnqualifiedType(); 9964 } 9965 QualType ResultType = mergeTypes(LHSValue, RHSValue, false, 9966 Unqualified); 9967 if (ResultType.isNull()) 9968 return {}; 9969 if (getCanonicalType(LHSValue) == getCanonicalType(ResultType)) 9970 return LHS; 9971 if (getCanonicalType(RHSValue) == getCanonicalType(ResultType)) 9972 return RHS; 9973 return getAtomicType(ResultType); 9974 } 9975 case Type::ConstantArray: 9976 { 9977 const ConstantArrayType* LCAT = getAsConstantArrayType(LHS); 9978 const ConstantArrayType* RCAT = getAsConstantArrayType(RHS); 9979 if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize()) 9980 return {}; 9981 9982 QualType LHSElem = getAsArrayType(LHS)->getElementType(); 9983 QualType RHSElem = getAsArrayType(RHS)->getElementType(); 9984 if (Unqualified) { 9985 LHSElem = LHSElem.getUnqualifiedType(); 9986 RHSElem = RHSElem.getUnqualifiedType(); 9987 } 9988 9989 QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified); 9990 if (ResultType.isNull()) 9991 return {}; 9992 9993 const VariableArrayType* LVAT = getAsVariableArrayType(LHS); 9994 const VariableArrayType* RVAT = getAsVariableArrayType(RHS); 9995 9996 // If either side is a variable array, and both are complete, check whether 9997 // the current dimension is definite. 9998 if (LVAT || RVAT) { 9999 auto SizeFetch = [this](const VariableArrayType* VAT, 10000 const ConstantArrayType* CAT) 10001 -> std::pair<bool,llvm::APInt> { 10002 if (VAT) { 10003 Optional<llvm::APSInt> TheInt; 10004 Expr *E = VAT->getSizeExpr(); 10005 if (E && (TheInt = E->getIntegerConstantExpr(*this))) 10006 return std::make_pair(true, *TheInt); 10007 return std::make_pair(false, llvm::APSInt()); 10008 } 10009 if (CAT) 10010 return std::make_pair(true, CAT->getSize()); 10011 return std::make_pair(false, llvm::APInt()); 10012 }; 10013 10014 bool HaveLSize, HaveRSize; 10015 llvm::APInt LSize, RSize; 10016 std::tie(HaveLSize, LSize) = SizeFetch(LVAT, LCAT); 10017 std::tie(HaveRSize, RSize) = SizeFetch(RVAT, RCAT); 10018 if (HaveLSize && HaveRSize && !llvm::APInt::isSameValue(LSize, RSize)) 10019 return {}; // Definite, but unequal, array dimension 10020 } 10021 10022 if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10023 return LHS; 10024 if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10025 return RHS; 10026 if (LCAT) 10027 return getConstantArrayType(ResultType, LCAT->getSize(), 10028 LCAT->getSizeExpr(), 10029 ArrayType::ArraySizeModifier(), 0); 10030 if (RCAT) 10031 return getConstantArrayType(ResultType, RCAT->getSize(), 10032 RCAT->getSizeExpr(), 10033 ArrayType::ArraySizeModifier(), 0); 10034 if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType)) 10035 return LHS; 10036 if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType)) 10037 return RHS; 10038 if (LVAT) { 10039 // FIXME: This isn't correct! But tricky to implement because 10040 // the array's size has to be the size of LHS, but the type 10041 // has to be different. 10042 return LHS; 10043 } 10044 if (RVAT) { 10045 // FIXME: This isn't correct! But tricky to implement because 10046 // the array's size has to be the size of RHS, but the type 10047 // has to be different. 10048 return RHS; 10049 } 10050 if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS; 10051 if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS; 10052 return getIncompleteArrayType(ResultType, 10053 ArrayType::ArraySizeModifier(), 0); 10054 } 10055 case Type::FunctionNoProto: 10056 return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified); 10057 case Type::Record: 10058 case Type::Enum: 10059 return {}; 10060 case Type::Builtin: 10061 // Only exactly equal builtin types are compatible, which is tested above. 10062 return {}; 10063 case Type::Complex: 10064 // Distinct complex types are incompatible. 10065 return {}; 10066 case Type::Vector: 10067 // FIXME: The merged type should be an ExtVector! 10068 if (areCompatVectorTypes(LHSCan->castAs<VectorType>(), 10069 RHSCan->castAs<VectorType>())) 10070 return LHS; 10071 return {}; 10072 case Type::ConstantMatrix: 10073 if (areCompatMatrixTypes(LHSCan->castAs<ConstantMatrixType>(), 10074 RHSCan->castAs<ConstantMatrixType>())) 10075 return LHS; 10076 return {}; 10077 case Type::ObjCObject: { 10078 // Check if the types are assignment compatible. 10079 // FIXME: This should be type compatibility, e.g. whether 10080 // "LHS x; RHS x;" at global scope is legal. 10081 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectType>(), 10082 RHS->castAs<ObjCObjectType>())) 10083 return LHS; 10084 return {}; 10085 } 10086 case Type::ObjCObjectPointer: 10087 if (OfBlockPointer) { 10088 if (canAssignObjCInterfacesInBlockPointer( 10089 LHS->castAs<ObjCObjectPointerType>(), 10090 RHS->castAs<ObjCObjectPointerType>(), BlockReturnType)) 10091 return LHS; 10092 return {}; 10093 } 10094 if (canAssignObjCInterfaces(LHS->castAs<ObjCObjectPointerType>(), 10095 RHS->castAs<ObjCObjectPointerType>())) 10096 return LHS; 10097 return {}; 10098 case Type::Pipe: 10099 assert(LHS != RHS && 10100 "Equivalent pipe types should have already been handled!"); 10101 return {}; 10102 case Type::ExtInt: { 10103 // Merge two ext-int types, while trying to preserve typedef info. 10104 bool LHSUnsigned = LHS->castAs<ExtIntType>()->isUnsigned(); 10105 bool RHSUnsigned = RHS->castAs<ExtIntType>()->isUnsigned(); 10106 unsigned LHSBits = LHS->castAs<ExtIntType>()->getNumBits(); 10107 unsigned RHSBits = RHS->castAs<ExtIntType>()->getNumBits(); 10108 10109 // Like unsigned/int, shouldn't have a type if they don't match. 10110 if (LHSUnsigned != RHSUnsigned) 10111 return {}; 10112 10113 if (LHSBits != RHSBits) 10114 return {}; 10115 return LHS; 10116 } 10117 } 10118 10119 llvm_unreachable("Invalid Type::Class!"); 10120 } 10121 10122 bool ASTContext::mergeExtParameterInfo( 10123 const FunctionProtoType *FirstFnType, const FunctionProtoType *SecondFnType, 10124 bool &CanUseFirst, bool &CanUseSecond, 10125 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &NewParamInfos) { 10126 assert(NewParamInfos.empty() && "param info list not empty"); 10127 CanUseFirst = CanUseSecond = true; 10128 bool FirstHasInfo = FirstFnType->hasExtParameterInfos(); 10129 bool SecondHasInfo = SecondFnType->hasExtParameterInfos(); 10130 10131 // Fast path: if the first type doesn't have ext parameter infos, 10132 // we match if and only if the second type also doesn't have them. 10133 if (!FirstHasInfo && !SecondHasInfo) 10134 return true; 10135 10136 bool NeedParamInfo = false; 10137 size_t E = FirstHasInfo ? FirstFnType->getExtParameterInfos().size() 10138 : SecondFnType->getExtParameterInfos().size(); 10139 10140 for (size_t I = 0; I < E; ++I) { 10141 FunctionProtoType::ExtParameterInfo FirstParam, SecondParam; 10142 if (FirstHasInfo) 10143 FirstParam = FirstFnType->getExtParameterInfo(I); 10144 if (SecondHasInfo) 10145 SecondParam = SecondFnType->getExtParameterInfo(I); 10146 10147 // Cannot merge unless everything except the noescape flag matches. 10148 if (FirstParam.withIsNoEscape(false) != SecondParam.withIsNoEscape(false)) 10149 return false; 10150 10151 bool FirstNoEscape = FirstParam.isNoEscape(); 10152 bool SecondNoEscape = SecondParam.isNoEscape(); 10153 bool IsNoEscape = FirstNoEscape && SecondNoEscape; 10154 NewParamInfos.push_back(FirstParam.withIsNoEscape(IsNoEscape)); 10155 if (NewParamInfos.back().getOpaqueValue()) 10156 NeedParamInfo = true; 10157 if (FirstNoEscape != IsNoEscape) 10158 CanUseFirst = false; 10159 if (SecondNoEscape != IsNoEscape) 10160 CanUseSecond = false; 10161 } 10162 10163 if (!NeedParamInfo) 10164 NewParamInfos.clear(); 10165 10166 return true; 10167 } 10168 10169 void ASTContext::ResetObjCLayout(const ObjCContainerDecl *CD) { 10170 ObjCLayouts[CD] = nullptr; 10171 } 10172 10173 /// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and 10174 /// 'RHS' attributes and returns the merged version; including for function 10175 /// return types. 10176 QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) { 10177 QualType LHSCan = getCanonicalType(LHS), 10178 RHSCan = getCanonicalType(RHS); 10179 // If two types are identical, they are compatible. 10180 if (LHSCan == RHSCan) 10181 return LHS; 10182 if (RHSCan->isFunctionType()) { 10183 if (!LHSCan->isFunctionType()) 10184 return {}; 10185 QualType OldReturnType = 10186 cast<FunctionType>(RHSCan.getTypePtr())->getReturnType(); 10187 QualType NewReturnType = 10188 cast<FunctionType>(LHSCan.getTypePtr())->getReturnType(); 10189 QualType ResReturnType = 10190 mergeObjCGCQualifiers(NewReturnType, OldReturnType); 10191 if (ResReturnType.isNull()) 10192 return {}; 10193 if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) { 10194 // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo(); 10195 // In either case, use OldReturnType to build the new function type. 10196 const auto *F = LHS->castAs<FunctionType>(); 10197 if (const auto *FPT = cast<FunctionProtoType>(F)) { 10198 FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); 10199 EPI.ExtInfo = getFunctionExtInfo(LHS); 10200 QualType ResultType = 10201 getFunctionType(OldReturnType, FPT->getParamTypes(), EPI); 10202 return ResultType; 10203 } 10204 } 10205 return {}; 10206 } 10207 10208 // If the qualifiers are different, the types can still be merged. 10209 Qualifiers LQuals = LHSCan.getLocalQualifiers(); 10210 Qualifiers RQuals = RHSCan.getLocalQualifiers(); 10211 if (LQuals != RQuals) { 10212 // If any of these qualifiers are different, we have a type mismatch. 10213 if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() || 10214 LQuals.getAddressSpace() != RQuals.getAddressSpace()) 10215 return {}; 10216 10217 // Exactly one GC qualifier difference is allowed: __strong is 10218 // okay if the other type has no GC qualifier but is an Objective 10219 // C object pointer (i.e. implicitly strong by default). We fix 10220 // this by pretending that the unqualified type was actually 10221 // qualified __strong. 10222 Qualifiers::GC GC_L = LQuals.getObjCGCAttr(); 10223 Qualifiers::GC GC_R = RQuals.getObjCGCAttr(); 10224 assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements"); 10225 10226 if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak) 10227 return {}; 10228 10229 if (GC_L == Qualifiers::Strong) 10230 return LHS; 10231 if (GC_R == Qualifiers::Strong) 10232 return RHS; 10233 return {}; 10234 } 10235 10236 if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) { 10237 QualType LHSBaseQT = LHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10238 QualType RHSBaseQT = RHS->castAs<ObjCObjectPointerType>()->getPointeeType(); 10239 QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT); 10240 if (ResQT == LHSBaseQT) 10241 return LHS; 10242 if (ResQT == RHSBaseQT) 10243 return RHS; 10244 } 10245 return {}; 10246 } 10247 10248 //===----------------------------------------------------------------------===// 10249 // Integer Predicates 10250 //===----------------------------------------------------------------------===// 10251 10252 unsigned ASTContext::getIntWidth(QualType T) const { 10253 if (const auto *ET = T->getAs<EnumType>()) 10254 T = ET->getDecl()->getIntegerType(); 10255 if (T->isBooleanType()) 10256 return 1; 10257 if(const auto *EIT = T->getAs<ExtIntType>()) 10258 return EIT->getNumBits(); 10259 // For builtin types, just use the standard type sizing method 10260 return (unsigned)getTypeSize(T); 10261 } 10262 10263 QualType ASTContext::getCorrespondingUnsignedType(QualType T) const { 10264 assert((T->hasSignedIntegerRepresentation() || T->isSignedFixedPointType()) && 10265 "Unexpected type"); 10266 10267 // Turn <4 x signed int> -> <4 x unsigned int> 10268 if (const auto *VTy = T->getAs<VectorType>()) 10269 return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()), 10270 VTy->getNumElements(), VTy->getVectorKind()); 10271 10272 // For _ExtInt, return an unsigned _ExtInt with same width. 10273 if (const auto *EITy = T->getAs<ExtIntType>()) 10274 return getExtIntType(/*IsUnsigned=*/true, EITy->getNumBits()); 10275 10276 // For enums, get the underlying integer type of the enum, and let the general 10277 // integer type signchanging code handle it. 10278 if (const auto *ETy = T->getAs<EnumType>()) 10279 T = ETy->getDecl()->getIntegerType(); 10280 10281 switch (T->castAs<BuiltinType>()->getKind()) { 10282 case BuiltinType::Char_S: 10283 case BuiltinType::SChar: 10284 return UnsignedCharTy; 10285 case BuiltinType::Short: 10286 return UnsignedShortTy; 10287 case BuiltinType::Int: 10288 return UnsignedIntTy; 10289 case BuiltinType::Long: 10290 return UnsignedLongTy; 10291 case BuiltinType::LongLong: 10292 return UnsignedLongLongTy; 10293 case BuiltinType::Int128: 10294 return UnsignedInt128Ty; 10295 // wchar_t is special. It is either signed or not, but when it's signed, 10296 // there's no matching "unsigned wchar_t". Therefore we return the unsigned 10297 // version of it's underlying type instead. 10298 case BuiltinType::WChar_S: 10299 return getUnsignedWCharType(); 10300 10301 case BuiltinType::ShortAccum: 10302 return UnsignedShortAccumTy; 10303 case BuiltinType::Accum: 10304 return UnsignedAccumTy; 10305 case BuiltinType::LongAccum: 10306 return UnsignedLongAccumTy; 10307 case BuiltinType::SatShortAccum: 10308 return SatUnsignedShortAccumTy; 10309 case BuiltinType::SatAccum: 10310 return SatUnsignedAccumTy; 10311 case BuiltinType::SatLongAccum: 10312 return SatUnsignedLongAccumTy; 10313 case BuiltinType::ShortFract: 10314 return UnsignedShortFractTy; 10315 case BuiltinType::Fract: 10316 return UnsignedFractTy; 10317 case BuiltinType::LongFract: 10318 return UnsignedLongFractTy; 10319 case BuiltinType::SatShortFract: 10320 return SatUnsignedShortFractTy; 10321 case BuiltinType::SatFract: 10322 return SatUnsignedFractTy; 10323 case BuiltinType::SatLongFract: 10324 return SatUnsignedLongFractTy; 10325 default: 10326 llvm_unreachable("Unexpected signed integer or fixed point type"); 10327 } 10328 } 10329 10330 QualType ASTContext::getCorrespondingSignedType(QualType T) const { 10331 assert((T->hasUnsignedIntegerRepresentation() || 10332 T->isUnsignedFixedPointType()) && 10333 "Unexpected type"); 10334 10335 // Turn <4 x unsigned int> -> <4 x signed int> 10336 if (const auto *VTy = T->getAs<VectorType>()) 10337 return getVectorType(getCorrespondingSignedType(VTy->getElementType()), 10338 VTy->getNumElements(), VTy->getVectorKind()); 10339 10340 // For _ExtInt, return a signed _ExtInt with same width. 10341 if (const auto *EITy = T->getAs<ExtIntType>()) 10342 return getExtIntType(/*IsUnsigned=*/false, EITy->getNumBits()); 10343 10344 // For enums, get the underlying integer type of the enum, and let the general 10345 // integer type signchanging code handle it. 10346 if (const auto *ETy = T->getAs<EnumType>()) 10347 T = ETy->getDecl()->getIntegerType(); 10348 10349 switch (T->castAs<BuiltinType>()->getKind()) { 10350 case BuiltinType::Char_U: 10351 case BuiltinType::UChar: 10352 return SignedCharTy; 10353 case BuiltinType::UShort: 10354 return ShortTy; 10355 case BuiltinType::UInt: 10356 return IntTy; 10357 case BuiltinType::ULong: 10358 return LongTy; 10359 case BuiltinType::ULongLong: 10360 return LongLongTy; 10361 case BuiltinType::UInt128: 10362 return Int128Ty; 10363 // wchar_t is special. It is either unsigned or not, but when it's unsigned, 10364 // there's no matching "signed wchar_t". Therefore we return the signed 10365 // version of it's underlying type instead. 10366 case BuiltinType::WChar_U: 10367 return getSignedWCharType(); 10368 10369 case BuiltinType::UShortAccum: 10370 return ShortAccumTy; 10371 case BuiltinType::UAccum: 10372 return AccumTy; 10373 case BuiltinType::ULongAccum: 10374 return LongAccumTy; 10375 case BuiltinType::SatUShortAccum: 10376 return SatShortAccumTy; 10377 case BuiltinType::SatUAccum: 10378 return SatAccumTy; 10379 case BuiltinType::SatULongAccum: 10380 return SatLongAccumTy; 10381 case BuiltinType::UShortFract: 10382 return ShortFractTy; 10383 case BuiltinType::UFract: 10384 return FractTy; 10385 case BuiltinType::ULongFract: 10386 return LongFractTy; 10387 case BuiltinType::SatUShortFract: 10388 return SatShortFractTy; 10389 case BuiltinType::SatUFract: 10390 return SatFractTy; 10391 case BuiltinType::SatULongFract: 10392 return SatLongFractTy; 10393 default: 10394 llvm_unreachable("Unexpected unsigned integer or fixed point type"); 10395 } 10396 } 10397 10398 ASTMutationListener::~ASTMutationListener() = default; 10399 10400 void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD, 10401 QualType ReturnType) {} 10402 10403 //===----------------------------------------------------------------------===// 10404 // Builtin Type Computation 10405 //===----------------------------------------------------------------------===// 10406 10407 /// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the 10408 /// pointer over the consumed characters. This returns the resultant type. If 10409 /// AllowTypeModifiers is false then modifier like * are not parsed, just basic 10410 /// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of 10411 /// a vector of "i*". 10412 /// 10413 /// RequiresICE is filled in on return to indicate whether the value is required 10414 /// to be an Integer Constant Expression. 10415 static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context, 10416 ASTContext::GetBuiltinTypeError &Error, 10417 bool &RequiresICE, 10418 bool AllowTypeModifiers) { 10419 // Modifiers. 10420 int HowLong = 0; 10421 bool Signed = false, Unsigned = false; 10422 RequiresICE = false; 10423 10424 // Read the prefixed modifiers first. 10425 bool Done = false; 10426 #ifndef NDEBUG 10427 bool IsSpecial = false; 10428 #endif 10429 while (!Done) { 10430 switch (*Str++) { 10431 default: Done = true; --Str; break; 10432 case 'I': 10433 RequiresICE = true; 10434 break; 10435 case 'S': 10436 assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!"); 10437 assert(!Signed && "Can't use 'S' modifier multiple times!"); 10438 Signed = true; 10439 break; 10440 case 'U': 10441 assert(!Signed && "Can't use both 'S' and 'U' modifiers!"); 10442 assert(!Unsigned && "Can't use 'U' modifier multiple times!"); 10443 Unsigned = true; 10444 break; 10445 case 'L': 10446 assert(!IsSpecial && "Can't use 'L' with 'W', 'N', 'Z' or 'O' modifiers"); 10447 assert(HowLong <= 2 && "Can't have LLLL modifier"); 10448 ++HowLong; 10449 break; 10450 case 'N': 10451 // 'N' behaves like 'L' for all non LP64 targets and 'int' otherwise. 10452 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10453 assert(HowLong == 0 && "Can't use both 'L' and 'N' modifiers!"); 10454 #ifndef NDEBUG 10455 IsSpecial = true; 10456 #endif 10457 if (Context.getTargetInfo().getLongWidth() == 32) 10458 ++HowLong; 10459 break; 10460 case 'W': 10461 // This modifier represents int64 type. 10462 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10463 assert(HowLong == 0 && "Can't use both 'L' and 'W' modifiers!"); 10464 #ifndef NDEBUG 10465 IsSpecial = true; 10466 #endif 10467 switch (Context.getTargetInfo().getInt64Type()) { 10468 default: 10469 llvm_unreachable("Unexpected integer type"); 10470 case TargetInfo::SignedLong: 10471 HowLong = 1; 10472 break; 10473 case TargetInfo::SignedLongLong: 10474 HowLong = 2; 10475 break; 10476 } 10477 break; 10478 case 'Z': 10479 // This modifier represents int32 type. 10480 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10481 assert(HowLong == 0 && "Can't use both 'L' and 'Z' modifiers!"); 10482 #ifndef NDEBUG 10483 IsSpecial = true; 10484 #endif 10485 switch (Context.getTargetInfo().getIntTypeByWidth(32, true)) { 10486 default: 10487 llvm_unreachable("Unexpected integer type"); 10488 case TargetInfo::SignedInt: 10489 HowLong = 0; 10490 break; 10491 case TargetInfo::SignedLong: 10492 HowLong = 1; 10493 break; 10494 case TargetInfo::SignedLongLong: 10495 HowLong = 2; 10496 break; 10497 } 10498 break; 10499 case 'O': 10500 assert(!IsSpecial && "Can't use two 'N', 'W', 'Z' or 'O' modifiers!"); 10501 assert(HowLong == 0 && "Can't use both 'L' and 'O' modifiers!"); 10502 #ifndef NDEBUG 10503 IsSpecial = true; 10504 #endif 10505 if (Context.getLangOpts().OpenCL) 10506 HowLong = 1; 10507 else 10508 HowLong = 2; 10509 break; 10510 } 10511 } 10512 10513 QualType Type; 10514 10515 // Read the base type. 10516 switch (*Str++) { 10517 default: llvm_unreachable("Unknown builtin type letter!"); 10518 case 'x': 10519 assert(HowLong == 0 && !Signed && !Unsigned && 10520 "Bad modifiers used with 'x'!"); 10521 Type = Context.Float16Ty; 10522 break; 10523 case 'y': 10524 assert(HowLong == 0 && !Signed && !Unsigned && 10525 "Bad modifiers used with 'y'!"); 10526 Type = Context.BFloat16Ty; 10527 break; 10528 case 'v': 10529 assert(HowLong == 0 && !Signed && !Unsigned && 10530 "Bad modifiers used with 'v'!"); 10531 Type = Context.VoidTy; 10532 break; 10533 case 'h': 10534 assert(HowLong == 0 && !Signed && !Unsigned && 10535 "Bad modifiers used with 'h'!"); 10536 Type = Context.HalfTy; 10537 break; 10538 case 'f': 10539 assert(HowLong == 0 && !Signed && !Unsigned && 10540 "Bad modifiers used with 'f'!"); 10541 Type = Context.FloatTy; 10542 break; 10543 case 'd': 10544 assert(HowLong < 3 && !Signed && !Unsigned && 10545 "Bad modifiers used with 'd'!"); 10546 if (HowLong == 1) 10547 Type = Context.LongDoubleTy; 10548 else if (HowLong == 2) 10549 Type = Context.Float128Ty; 10550 else 10551 Type = Context.DoubleTy; 10552 break; 10553 case 's': 10554 assert(HowLong == 0 && "Bad modifiers used with 's'!"); 10555 if (Unsigned) 10556 Type = Context.UnsignedShortTy; 10557 else 10558 Type = Context.ShortTy; 10559 break; 10560 case 'i': 10561 if (HowLong == 3) 10562 Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty; 10563 else if (HowLong == 2) 10564 Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy; 10565 else if (HowLong == 1) 10566 Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy; 10567 else 10568 Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy; 10569 break; 10570 case 'c': 10571 assert(HowLong == 0 && "Bad modifiers used with 'c'!"); 10572 if (Signed) 10573 Type = Context.SignedCharTy; 10574 else if (Unsigned) 10575 Type = Context.UnsignedCharTy; 10576 else 10577 Type = Context.CharTy; 10578 break; 10579 case 'b': // boolean 10580 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!"); 10581 Type = Context.BoolTy; 10582 break; 10583 case 'z': // size_t. 10584 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!"); 10585 Type = Context.getSizeType(); 10586 break; 10587 case 'w': // wchar_t. 10588 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!"); 10589 Type = Context.getWideCharType(); 10590 break; 10591 case 'F': 10592 Type = Context.getCFConstantStringType(); 10593 break; 10594 case 'G': 10595 Type = Context.getObjCIdType(); 10596 break; 10597 case 'H': 10598 Type = Context.getObjCSelType(); 10599 break; 10600 case 'M': 10601 Type = Context.getObjCSuperType(); 10602 break; 10603 case 'a': 10604 Type = Context.getBuiltinVaListType(); 10605 assert(!Type.isNull() && "builtin va list type not initialized!"); 10606 break; 10607 case 'A': 10608 // This is a "reference" to a va_list; however, what exactly 10609 // this means depends on how va_list is defined. There are two 10610 // different kinds of va_list: ones passed by value, and ones 10611 // passed by reference. An example of a by-value va_list is 10612 // x86, where va_list is a char*. An example of by-ref va_list 10613 // is x86-64, where va_list is a __va_list_tag[1]. For x86, 10614 // we want this argument to be a char*&; for x86-64, we want 10615 // it to be a __va_list_tag*. 10616 Type = Context.getBuiltinVaListType(); 10617 assert(!Type.isNull() && "builtin va list type not initialized!"); 10618 if (Type->isArrayType()) 10619 Type = Context.getArrayDecayedType(Type); 10620 else 10621 Type = Context.getLValueReferenceType(Type); 10622 break; 10623 case 'q': { 10624 char *End; 10625 unsigned NumElements = strtoul(Str, &End, 10); 10626 assert(End != Str && "Missing vector size"); 10627 Str = End; 10628 10629 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 10630 RequiresICE, false); 10631 assert(!RequiresICE && "Can't require vector ICE"); 10632 10633 Type = Context.getScalableVectorType(ElementType, NumElements); 10634 break; 10635 } 10636 case 'V': { 10637 char *End; 10638 unsigned NumElements = strtoul(Str, &End, 10); 10639 assert(End != Str && "Missing vector size"); 10640 Str = End; 10641 10642 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, 10643 RequiresICE, false); 10644 assert(!RequiresICE && "Can't require vector ICE"); 10645 10646 // TODO: No way to make AltiVec vectors in builtins yet. 10647 Type = Context.getVectorType(ElementType, NumElements, 10648 VectorType::GenericVector); 10649 break; 10650 } 10651 case 'E': { 10652 char *End; 10653 10654 unsigned NumElements = strtoul(Str, &End, 10); 10655 assert(End != Str && "Missing vector size"); 10656 10657 Str = End; 10658 10659 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 10660 false); 10661 Type = Context.getExtVectorType(ElementType, NumElements); 10662 break; 10663 } 10664 case 'X': { 10665 QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE, 10666 false); 10667 assert(!RequiresICE && "Can't require complex ICE"); 10668 Type = Context.getComplexType(ElementType); 10669 break; 10670 } 10671 case 'Y': 10672 Type = Context.getPointerDiffType(); 10673 break; 10674 case 'P': 10675 Type = Context.getFILEType(); 10676 if (Type.isNull()) { 10677 Error = ASTContext::GE_Missing_stdio; 10678 return {}; 10679 } 10680 break; 10681 case 'J': 10682 if (Signed) 10683 Type = Context.getsigjmp_bufType(); 10684 else 10685 Type = Context.getjmp_bufType(); 10686 10687 if (Type.isNull()) { 10688 Error = ASTContext::GE_Missing_setjmp; 10689 return {}; 10690 } 10691 break; 10692 case 'K': 10693 assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!"); 10694 Type = Context.getucontext_tType(); 10695 10696 if (Type.isNull()) { 10697 Error = ASTContext::GE_Missing_ucontext; 10698 return {}; 10699 } 10700 break; 10701 case 'p': 10702 Type = Context.getProcessIDType(); 10703 break; 10704 } 10705 10706 // If there are modifiers and if we're allowed to parse them, go for it. 10707 Done = !AllowTypeModifiers; 10708 while (!Done) { 10709 switch (char c = *Str++) { 10710 default: Done = true; --Str; break; 10711 case '*': 10712 case '&': { 10713 // Both pointers and references can have their pointee types 10714 // qualified with an address space. 10715 char *End; 10716 unsigned AddrSpace = strtoul(Str, &End, 10); 10717 if (End != Str) { 10718 // Note AddrSpace == 0 is not the same as an unspecified address space. 10719 Type = Context.getAddrSpaceQualType( 10720 Type, 10721 Context.getLangASForBuiltinAddressSpace(AddrSpace)); 10722 Str = End; 10723 } 10724 if (c == '*') 10725 Type = Context.getPointerType(Type); 10726 else 10727 Type = Context.getLValueReferenceType(Type); 10728 break; 10729 } 10730 // FIXME: There's no way to have a built-in with an rvalue ref arg. 10731 case 'C': 10732 Type = Type.withConst(); 10733 break; 10734 case 'D': 10735 Type = Context.getVolatileType(Type); 10736 break; 10737 case 'R': 10738 Type = Type.withRestrict(); 10739 break; 10740 } 10741 } 10742 10743 assert((!RequiresICE || Type->isIntegralOrEnumerationType()) && 10744 "Integer constant 'I' type must be an integer"); 10745 10746 return Type; 10747 } 10748 10749 // On some targets such as PowerPC, some of the builtins are defined with custom 10750 // type descriptors for target-dependent types. These descriptors are decoded in 10751 // other functions, but it may be useful to be able to fall back to default 10752 // descriptor decoding to define builtins mixing target-dependent and target- 10753 // independent types. This function allows decoding one type descriptor with 10754 // default decoding. 10755 QualType ASTContext::DecodeTypeStr(const char *&Str, const ASTContext &Context, 10756 GetBuiltinTypeError &Error, bool &RequireICE, 10757 bool AllowTypeModifiers) const { 10758 return DecodeTypeFromStr(Str, Context, Error, RequireICE, AllowTypeModifiers); 10759 } 10760 10761 /// GetBuiltinType - Return the type for the specified builtin. 10762 QualType ASTContext::GetBuiltinType(unsigned Id, 10763 GetBuiltinTypeError &Error, 10764 unsigned *IntegerConstantArgs) const { 10765 const char *TypeStr = BuiltinInfo.getTypeString(Id); 10766 if (TypeStr[0] == '\0') { 10767 Error = GE_Missing_type; 10768 return {}; 10769 } 10770 10771 SmallVector<QualType, 8> ArgTypes; 10772 10773 bool RequiresICE = false; 10774 Error = GE_None; 10775 QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error, 10776 RequiresICE, true); 10777 if (Error != GE_None) 10778 return {}; 10779 10780 assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE"); 10781 10782 while (TypeStr[0] && TypeStr[0] != '.') { 10783 QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true); 10784 if (Error != GE_None) 10785 return {}; 10786 10787 // If this argument is required to be an IntegerConstantExpression and the 10788 // caller cares, fill in the bitmask we return. 10789 if (RequiresICE && IntegerConstantArgs) 10790 *IntegerConstantArgs |= 1 << ArgTypes.size(); 10791 10792 // Do array -> pointer decay. The builtin should use the decayed type. 10793 if (Ty->isArrayType()) 10794 Ty = getArrayDecayedType(Ty); 10795 10796 ArgTypes.push_back(Ty); 10797 } 10798 10799 if (Id == Builtin::BI__GetExceptionInfo) 10800 return {}; 10801 10802 assert((TypeStr[0] != '.' || TypeStr[1] == 0) && 10803 "'.' should only occur at end of builtin type list!"); 10804 10805 bool Variadic = (TypeStr[0] == '.'); 10806 10807 FunctionType::ExtInfo EI(getDefaultCallingConvention( 10808 Variadic, /*IsCXXMethod=*/false, /*IsBuiltin=*/true)); 10809 if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true); 10810 10811 10812 // We really shouldn't be making a no-proto type here. 10813 if (ArgTypes.empty() && Variadic && !getLangOpts().CPlusPlus) 10814 return getFunctionNoProtoType(ResType, EI); 10815 10816 FunctionProtoType::ExtProtoInfo EPI; 10817 EPI.ExtInfo = EI; 10818 EPI.Variadic = Variadic; 10819 if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id)) 10820 EPI.ExceptionSpec.Type = 10821 getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; 10822 10823 return getFunctionType(ResType, ArgTypes, EPI); 10824 } 10825 10826 static GVALinkage basicGVALinkageForFunction(const ASTContext &Context, 10827 const FunctionDecl *FD) { 10828 if (!FD->isExternallyVisible()) 10829 return GVA_Internal; 10830 10831 // Non-user-provided functions get emitted as weak definitions with every 10832 // use, no matter whether they've been explicitly instantiated etc. 10833 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) 10834 if (!MD->isUserProvided()) 10835 return GVA_DiscardableODR; 10836 10837 GVALinkage External; 10838 switch (FD->getTemplateSpecializationKind()) { 10839 case TSK_Undeclared: 10840 case TSK_ExplicitSpecialization: 10841 External = GVA_StrongExternal; 10842 break; 10843 10844 case TSK_ExplicitInstantiationDefinition: 10845 return GVA_StrongODR; 10846 10847 // C++11 [temp.explicit]p10: 10848 // [ Note: The intent is that an inline function that is the subject of 10849 // an explicit instantiation declaration will still be implicitly 10850 // instantiated when used so that the body can be considered for 10851 // inlining, but that no out-of-line copy of the inline function would be 10852 // generated in the translation unit. -- end note ] 10853 case TSK_ExplicitInstantiationDeclaration: 10854 return GVA_AvailableExternally; 10855 10856 case TSK_ImplicitInstantiation: 10857 External = GVA_DiscardableODR; 10858 break; 10859 } 10860 10861 if (!FD->isInlined()) 10862 return External; 10863 10864 if ((!Context.getLangOpts().CPlusPlus && 10865 !Context.getTargetInfo().getCXXABI().isMicrosoft() && 10866 !FD->hasAttr<DLLExportAttr>()) || 10867 FD->hasAttr<GNUInlineAttr>()) { 10868 // FIXME: This doesn't match gcc's behavior for dllexport inline functions. 10869 10870 // GNU or C99 inline semantics. Determine whether this symbol should be 10871 // externally visible. 10872 if (FD->isInlineDefinitionExternallyVisible()) 10873 return External; 10874 10875 // C99 inline semantics, where the symbol is not externally visible. 10876 return GVA_AvailableExternally; 10877 } 10878 10879 // Functions specified with extern and inline in -fms-compatibility mode 10880 // forcibly get emitted. While the body of the function cannot be later 10881 // replaced, the function definition cannot be discarded. 10882 if (FD->isMSExternInline()) 10883 return GVA_StrongODR; 10884 10885 return GVA_DiscardableODR; 10886 } 10887 10888 static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context, 10889 const Decl *D, GVALinkage L) { 10890 // See http://msdn.microsoft.com/en-us/library/xa0d9ste.aspx 10891 // dllexport/dllimport on inline functions. 10892 if (D->hasAttr<DLLImportAttr>()) { 10893 if (L == GVA_DiscardableODR || L == GVA_StrongODR) 10894 return GVA_AvailableExternally; 10895 } else if (D->hasAttr<DLLExportAttr>()) { 10896 if (L == GVA_DiscardableODR) 10897 return GVA_StrongODR; 10898 } else if (Context.getLangOpts().CUDA && Context.getLangOpts().CUDAIsDevice) { 10899 // Device-side functions with __global__ attribute must always be 10900 // visible externally so they can be launched from host. 10901 if (D->hasAttr<CUDAGlobalAttr>() && 10902 (L == GVA_DiscardableODR || L == GVA_Internal)) 10903 return GVA_StrongODR; 10904 // Single source offloading languages like CUDA/HIP need to be able to 10905 // access static device variables from host code of the same compilation 10906 // unit. This is done by externalizing the static variable with a shared 10907 // name between the host and device compilation which is the same for the 10908 // same compilation unit whereas different among different compilation 10909 // units. 10910 if (Context.shouldExternalizeStaticVar(D)) 10911 return GVA_StrongExternal; 10912 } 10913 return L; 10914 } 10915 10916 /// Adjust the GVALinkage for a declaration based on what an external AST source 10917 /// knows about whether there can be other definitions of this declaration. 10918 static GVALinkage 10919 adjustGVALinkageForExternalDefinitionKind(const ASTContext &Ctx, const Decl *D, 10920 GVALinkage L) { 10921 ExternalASTSource *Source = Ctx.getExternalSource(); 10922 if (!Source) 10923 return L; 10924 10925 switch (Source->hasExternalDefinitions(D)) { 10926 case ExternalASTSource::EK_Never: 10927 // Other translation units rely on us to provide the definition. 10928 if (L == GVA_DiscardableODR) 10929 return GVA_StrongODR; 10930 break; 10931 10932 case ExternalASTSource::EK_Always: 10933 return GVA_AvailableExternally; 10934 10935 case ExternalASTSource::EK_ReplyHazy: 10936 break; 10937 } 10938 return L; 10939 } 10940 10941 GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const { 10942 return adjustGVALinkageForExternalDefinitionKind(*this, FD, 10943 adjustGVALinkageForAttributes(*this, FD, 10944 basicGVALinkageForFunction(*this, FD))); 10945 } 10946 10947 static GVALinkage basicGVALinkageForVariable(const ASTContext &Context, 10948 const VarDecl *VD) { 10949 if (!VD->isExternallyVisible()) 10950 return GVA_Internal; 10951 10952 if (VD->isStaticLocal()) { 10953 const DeclContext *LexicalContext = VD->getParentFunctionOrMethod(); 10954 while (LexicalContext && !isa<FunctionDecl>(LexicalContext)) 10955 LexicalContext = LexicalContext->getLexicalParent(); 10956 10957 // ObjC Blocks can create local variables that don't have a FunctionDecl 10958 // LexicalContext. 10959 if (!LexicalContext) 10960 return GVA_DiscardableODR; 10961 10962 // Otherwise, let the static local variable inherit its linkage from the 10963 // nearest enclosing function. 10964 auto StaticLocalLinkage = 10965 Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext)); 10966 10967 // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must 10968 // be emitted in any object with references to the symbol for the object it 10969 // contains, whether inline or out-of-line." 10970 // Similar behavior is observed with MSVC. An alternative ABI could use 10971 // StrongODR/AvailableExternally to match the function, but none are 10972 // known/supported currently. 10973 if (StaticLocalLinkage == GVA_StrongODR || 10974 StaticLocalLinkage == GVA_AvailableExternally) 10975 return GVA_DiscardableODR; 10976 return StaticLocalLinkage; 10977 } 10978 10979 // MSVC treats in-class initialized static data members as definitions. 10980 // By giving them non-strong linkage, out-of-line definitions won't 10981 // cause link errors. 10982 if (Context.isMSStaticDataMemberInlineDefinition(VD)) 10983 return GVA_DiscardableODR; 10984 10985 // Most non-template variables have strong linkage; inline variables are 10986 // linkonce_odr or (occasionally, for compatibility) weak_odr. 10987 GVALinkage StrongLinkage; 10988 switch (Context.getInlineVariableDefinitionKind(VD)) { 10989 case ASTContext::InlineVariableDefinitionKind::None: 10990 StrongLinkage = GVA_StrongExternal; 10991 break; 10992 case ASTContext::InlineVariableDefinitionKind::Weak: 10993 case ASTContext::InlineVariableDefinitionKind::WeakUnknown: 10994 StrongLinkage = GVA_DiscardableODR; 10995 break; 10996 case ASTContext::InlineVariableDefinitionKind::Strong: 10997 StrongLinkage = GVA_StrongODR; 10998 break; 10999 } 11000 11001 switch (VD->getTemplateSpecializationKind()) { 11002 case TSK_Undeclared: 11003 return StrongLinkage; 11004 11005 case TSK_ExplicitSpecialization: 11006 return Context.getTargetInfo().getCXXABI().isMicrosoft() && 11007 VD->isStaticDataMember() 11008 ? GVA_StrongODR 11009 : StrongLinkage; 11010 11011 case TSK_ExplicitInstantiationDefinition: 11012 return GVA_StrongODR; 11013 11014 case TSK_ExplicitInstantiationDeclaration: 11015 return GVA_AvailableExternally; 11016 11017 case TSK_ImplicitInstantiation: 11018 return GVA_DiscardableODR; 11019 } 11020 11021 llvm_unreachable("Invalid Linkage!"); 11022 } 11023 11024 GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) { 11025 return adjustGVALinkageForExternalDefinitionKind(*this, VD, 11026 adjustGVALinkageForAttributes(*this, VD, 11027 basicGVALinkageForVariable(*this, VD))); 11028 } 11029 11030 bool ASTContext::DeclMustBeEmitted(const Decl *D) { 11031 if (const auto *VD = dyn_cast<VarDecl>(D)) { 11032 if (!VD->isFileVarDecl()) 11033 return false; 11034 // Global named register variables (GNU extension) are never emitted. 11035 if (VD->getStorageClass() == SC_Register) 11036 return false; 11037 if (VD->getDescribedVarTemplate() || 11038 isa<VarTemplatePartialSpecializationDecl>(VD)) 11039 return false; 11040 } else if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11041 // We never need to emit an uninstantiated function template. 11042 if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate) 11043 return false; 11044 } else if (isa<PragmaCommentDecl>(D)) 11045 return true; 11046 else if (isa<PragmaDetectMismatchDecl>(D)) 11047 return true; 11048 else if (isa<OMPRequiresDecl>(D)) 11049 return true; 11050 else if (isa<OMPThreadPrivateDecl>(D)) 11051 return !D->getDeclContext()->isDependentContext(); 11052 else if (isa<OMPAllocateDecl>(D)) 11053 return !D->getDeclContext()->isDependentContext(); 11054 else if (isa<OMPDeclareReductionDecl>(D) || isa<OMPDeclareMapperDecl>(D)) 11055 return !D->getDeclContext()->isDependentContext(); 11056 else if (isa<ImportDecl>(D)) 11057 return true; 11058 else 11059 return false; 11060 11061 // If this is a member of a class template, we do not need to emit it. 11062 if (D->getDeclContext()->isDependentContext()) 11063 return false; 11064 11065 // Weak references don't produce any output by themselves. 11066 if (D->hasAttr<WeakRefAttr>()) 11067 return false; 11068 11069 // Aliases and used decls are required. 11070 if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>()) 11071 return true; 11072 11073 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 11074 // Forward declarations aren't required. 11075 if (!FD->doesThisDeclarationHaveABody()) 11076 return FD->doesDeclarationForceExternallyVisibleDefinition(); 11077 11078 // Constructors and destructors are required. 11079 if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>()) 11080 return true; 11081 11082 // The key function for a class is required. This rule only comes 11083 // into play when inline functions can be key functions, though. 11084 if (getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { 11085 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) { 11086 const CXXRecordDecl *RD = MD->getParent(); 11087 if (MD->isOutOfLine() && RD->isDynamicClass()) { 11088 const CXXMethodDecl *KeyFunc = getCurrentKeyFunction(RD); 11089 if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl()) 11090 return true; 11091 } 11092 } 11093 } 11094 11095 GVALinkage Linkage = GetGVALinkageForFunction(FD); 11096 11097 // static, static inline, always_inline, and extern inline functions can 11098 // always be deferred. Normal inline functions can be deferred in C99/C++. 11099 // Implicit template instantiations can also be deferred in C++. 11100 return !isDiscardableGVALinkage(Linkage); 11101 } 11102 11103 const auto *VD = cast<VarDecl>(D); 11104 assert(VD->isFileVarDecl() && "Expected file scoped var"); 11105 11106 // If the decl is marked as `declare target to`, it should be emitted for the 11107 // host and for the device. 11108 if (LangOpts.OpenMP && 11109 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) 11110 return true; 11111 11112 if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly && 11113 !isMSStaticDataMemberInlineDefinition(VD)) 11114 return false; 11115 11116 // Variables that can be needed in other TUs are required. 11117 auto Linkage = GetGVALinkageForVariable(VD); 11118 if (!isDiscardableGVALinkage(Linkage)) 11119 return true; 11120 11121 // We never need to emit a variable that is available in another TU. 11122 if (Linkage == GVA_AvailableExternally) 11123 return false; 11124 11125 // Variables that have destruction with side-effects are required. 11126 if (VD->needsDestruction(*this)) 11127 return true; 11128 11129 // Variables that have initialization with side-effects are required. 11130 if (VD->getInit() && VD->getInit()->HasSideEffects(*this) && 11131 // We can get a value-dependent initializer during error recovery. 11132 (VD->getInit()->isValueDependent() || !VD->evaluateValue())) 11133 return true; 11134 11135 // Likewise, variables with tuple-like bindings are required if their 11136 // bindings have side-effects. 11137 if (const auto *DD = dyn_cast<DecompositionDecl>(VD)) 11138 for (const auto *BD : DD->bindings()) 11139 if (const auto *BindingVD = BD->getHoldingVar()) 11140 if (DeclMustBeEmitted(BindingVD)) 11141 return true; 11142 11143 return false; 11144 } 11145 11146 void ASTContext::forEachMultiversionedFunctionVersion( 11147 const FunctionDecl *FD, 11148 llvm::function_ref<void(FunctionDecl *)> Pred) const { 11149 assert(FD->isMultiVersion() && "Only valid for multiversioned functions"); 11150 llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls; 11151 FD = FD->getMostRecentDecl(); 11152 // FIXME: The order of traversal here matters and depends on the order of 11153 // lookup results, which happens to be (mostly) oldest-to-newest, but we 11154 // shouldn't rely on that. 11155 for (auto *CurDecl : 11156 FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) { 11157 FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl(); 11158 if (CurFD && hasSameType(CurFD->getType(), FD->getType()) && 11159 std::end(SeenDecls) == llvm::find(SeenDecls, CurFD)) { 11160 SeenDecls.insert(CurFD); 11161 Pred(CurFD); 11162 } 11163 } 11164 } 11165 11166 CallingConv ASTContext::getDefaultCallingConvention(bool IsVariadic, 11167 bool IsCXXMethod, 11168 bool IsBuiltin) const { 11169 // Pass through to the C++ ABI object 11170 if (IsCXXMethod) 11171 return ABI->getDefaultMethodCallConv(IsVariadic); 11172 11173 // Builtins ignore user-specified default calling convention and remain the 11174 // Target's default calling convention. 11175 if (!IsBuiltin) { 11176 switch (LangOpts.getDefaultCallingConv()) { 11177 case LangOptions::DCC_None: 11178 break; 11179 case LangOptions::DCC_CDecl: 11180 return CC_C; 11181 case LangOptions::DCC_FastCall: 11182 if (getTargetInfo().hasFeature("sse2") && !IsVariadic) 11183 return CC_X86FastCall; 11184 break; 11185 case LangOptions::DCC_StdCall: 11186 if (!IsVariadic) 11187 return CC_X86StdCall; 11188 break; 11189 case LangOptions::DCC_VectorCall: 11190 // __vectorcall cannot be applied to variadic functions. 11191 if (!IsVariadic) 11192 return CC_X86VectorCall; 11193 break; 11194 case LangOptions::DCC_RegCall: 11195 // __regcall cannot be applied to variadic functions. 11196 if (!IsVariadic) 11197 return CC_X86RegCall; 11198 break; 11199 } 11200 } 11201 return Target->getDefaultCallingConv(); 11202 } 11203 11204 bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const { 11205 // Pass through to the C++ ABI object 11206 return ABI->isNearlyEmpty(RD); 11207 } 11208 11209 VTableContextBase *ASTContext::getVTableContext() { 11210 if (!VTContext.get()) { 11211 auto ABI = Target->getCXXABI(); 11212 if (ABI.isMicrosoft()) 11213 VTContext.reset(new MicrosoftVTableContext(*this)); 11214 else { 11215 auto ComponentLayout = getLangOpts().RelativeCXXABIVTables 11216 ? ItaniumVTableContext::Relative 11217 : ItaniumVTableContext::Pointer; 11218 VTContext.reset(new ItaniumVTableContext(*this, ComponentLayout)); 11219 } 11220 } 11221 return VTContext.get(); 11222 } 11223 11224 MangleContext *ASTContext::createMangleContext(const TargetInfo *T) { 11225 if (!T) 11226 T = Target; 11227 switch (T->getCXXABI().getKind()) { 11228 case TargetCXXABI::AppleARM64: 11229 case TargetCXXABI::Fuchsia: 11230 case TargetCXXABI::GenericAArch64: 11231 case TargetCXXABI::GenericItanium: 11232 case TargetCXXABI::GenericARM: 11233 case TargetCXXABI::GenericMIPS: 11234 case TargetCXXABI::iOS: 11235 case TargetCXXABI::WebAssembly: 11236 case TargetCXXABI::WatchOS: 11237 case TargetCXXABI::XL: 11238 return ItaniumMangleContext::create(*this, getDiagnostics()); 11239 case TargetCXXABI::Microsoft: 11240 return MicrosoftMangleContext::create(*this, getDiagnostics()); 11241 } 11242 llvm_unreachable("Unsupported ABI"); 11243 } 11244 11245 MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) { 11246 assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft && 11247 "Device mangle context does not support Microsoft mangling."); 11248 switch (T.getCXXABI().getKind()) { 11249 case TargetCXXABI::AppleARM64: 11250 case TargetCXXABI::Fuchsia: 11251 case TargetCXXABI::GenericAArch64: 11252 case TargetCXXABI::GenericItanium: 11253 case TargetCXXABI::GenericARM: 11254 case TargetCXXABI::GenericMIPS: 11255 case TargetCXXABI::iOS: 11256 case TargetCXXABI::WebAssembly: 11257 case TargetCXXABI::WatchOS: 11258 case TargetCXXABI::XL: 11259 return ItaniumMangleContext::create( 11260 *this, getDiagnostics(), 11261 [](ASTContext &, const NamedDecl *ND) -> llvm::Optional<unsigned> { 11262 if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) 11263 return RD->getDeviceLambdaManglingNumber(); 11264 return llvm::None; 11265 }); 11266 case TargetCXXABI::Microsoft: 11267 return MicrosoftMangleContext::create(*this, getDiagnostics()); 11268 } 11269 llvm_unreachable("Unsupported ABI"); 11270 } 11271 11272 CXXABI::~CXXABI() = default; 11273 11274 size_t ASTContext::getSideTableAllocatedMemory() const { 11275 return ASTRecordLayouts.getMemorySize() + 11276 llvm::capacity_in_bytes(ObjCLayouts) + 11277 llvm::capacity_in_bytes(KeyFunctions) + 11278 llvm::capacity_in_bytes(ObjCImpls) + 11279 llvm::capacity_in_bytes(BlockVarCopyInits) + 11280 llvm::capacity_in_bytes(DeclAttrs) + 11281 llvm::capacity_in_bytes(TemplateOrInstantiation) + 11282 llvm::capacity_in_bytes(InstantiatedFromUsingDecl) + 11283 llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl) + 11284 llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl) + 11285 llvm::capacity_in_bytes(OverriddenMethods) + 11286 llvm::capacity_in_bytes(Types) + 11287 llvm::capacity_in_bytes(VariableArrayTypes); 11288 } 11289 11290 /// getIntTypeForBitwidth - 11291 /// sets integer QualTy according to specified details: 11292 /// bitwidth, signed/unsigned. 11293 /// Returns empty type if there is no appropriate target types. 11294 QualType ASTContext::getIntTypeForBitwidth(unsigned DestWidth, 11295 unsigned Signed) const { 11296 TargetInfo::IntType Ty = getTargetInfo().getIntTypeByWidth(DestWidth, Signed); 11297 CanQualType QualTy = getFromTargetType(Ty); 11298 if (!QualTy && DestWidth == 128) 11299 return Signed ? Int128Ty : UnsignedInt128Ty; 11300 return QualTy; 11301 } 11302 11303 /// getRealTypeForBitwidth - 11304 /// sets floating point QualTy according to specified bitwidth. 11305 /// Returns empty type if there is no appropriate target types. 11306 QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth, 11307 FloatModeKind ExplicitType) const { 11308 FloatModeKind Ty = 11309 getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitType); 11310 switch (Ty) { 11311 case FloatModeKind::Float: 11312 return FloatTy; 11313 case FloatModeKind::Double: 11314 return DoubleTy; 11315 case FloatModeKind::LongDouble: 11316 return LongDoubleTy; 11317 case FloatModeKind::Float128: 11318 return Float128Ty; 11319 case FloatModeKind::Ibm128: 11320 return Ibm128Ty; 11321 case FloatModeKind::NoFloat: 11322 return {}; 11323 } 11324 11325 llvm_unreachable("Unhandled TargetInfo::RealType value"); 11326 } 11327 11328 void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) { 11329 if (Number > 1) 11330 MangleNumbers[ND] = Number; 11331 } 11332 11333 unsigned ASTContext::getManglingNumber(const NamedDecl *ND) const { 11334 auto I = MangleNumbers.find(ND); 11335 return I != MangleNumbers.end() ? I->second : 1; 11336 } 11337 11338 void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) { 11339 if (Number > 1) 11340 StaticLocalNumbers[VD] = Number; 11341 } 11342 11343 unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const { 11344 auto I = StaticLocalNumbers.find(VD); 11345 return I != StaticLocalNumbers.end() ? I->second : 1; 11346 } 11347 11348 MangleNumberingContext & 11349 ASTContext::getManglingNumberContext(const DeclContext *DC) { 11350 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 11351 std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC]; 11352 if (!MCtx) 11353 MCtx = createMangleNumberingContext(); 11354 return *MCtx; 11355 } 11356 11357 MangleNumberingContext & 11358 ASTContext::getManglingNumberContext(NeedExtraManglingDecl_t, const Decl *D) { 11359 assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C. 11360 std::unique_ptr<MangleNumberingContext> &MCtx = 11361 ExtraMangleNumberingContexts[D]; 11362 if (!MCtx) 11363 MCtx = createMangleNumberingContext(); 11364 return *MCtx; 11365 } 11366 11367 std::unique_ptr<MangleNumberingContext> 11368 ASTContext::createMangleNumberingContext() const { 11369 return ABI->createMangleNumberingContext(); 11370 } 11371 11372 const CXXConstructorDecl * 11373 ASTContext::getCopyConstructorForExceptionObject(CXXRecordDecl *RD) { 11374 return ABI->getCopyConstructorForExceptionObject( 11375 cast<CXXRecordDecl>(RD->getFirstDecl())); 11376 } 11377 11378 void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD, 11379 CXXConstructorDecl *CD) { 11380 return ABI->addCopyConstructorForExceptionObject( 11381 cast<CXXRecordDecl>(RD->getFirstDecl()), 11382 cast<CXXConstructorDecl>(CD->getFirstDecl())); 11383 } 11384 11385 void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD, 11386 TypedefNameDecl *DD) { 11387 return ABI->addTypedefNameForUnnamedTagDecl(TD, DD); 11388 } 11389 11390 TypedefNameDecl * 11391 ASTContext::getTypedefNameForUnnamedTagDecl(const TagDecl *TD) { 11392 return ABI->getTypedefNameForUnnamedTagDecl(TD); 11393 } 11394 11395 void ASTContext::addDeclaratorForUnnamedTagDecl(TagDecl *TD, 11396 DeclaratorDecl *DD) { 11397 return ABI->addDeclaratorForUnnamedTagDecl(TD, DD); 11398 } 11399 11400 DeclaratorDecl *ASTContext::getDeclaratorForUnnamedTagDecl(const TagDecl *TD) { 11401 return ABI->getDeclaratorForUnnamedTagDecl(TD); 11402 } 11403 11404 void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) { 11405 ParamIndices[D] = index; 11406 } 11407 11408 unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const { 11409 ParameterIndexTable::const_iterator I = ParamIndices.find(D); 11410 assert(I != ParamIndices.end() && 11411 "ParmIndices lacks entry set by ParmVarDecl"); 11412 return I->second; 11413 } 11414 11415 QualType ASTContext::getStringLiteralArrayType(QualType EltTy, 11416 unsigned Length) const { 11417 // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). 11418 if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) 11419 EltTy = EltTy.withConst(); 11420 11421 EltTy = adjustStringLiteralBaseType(EltTy); 11422 11423 // Get an array type for the string, according to C99 6.4.5. This includes 11424 // the null terminator character. 11425 return getConstantArrayType(EltTy, llvm::APInt(32, Length + 1), nullptr, 11426 ArrayType::Normal, /*IndexTypeQuals*/ 0); 11427 } 11428 11429 StringLiteral * 11430 ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const { 11431 StringLiteral *&Result = StringLiteralCache[Key]; 11432 if (!Result) 11433 Result = StringLiteral::Create( 11434 *this, Key, StringLiteral::Ascii, 11435 /*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()), 11436 SourceLocation()); 11437 return Result; 11438 } 11439 11440 MSGuidDecl * 11441 ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const { 11442 assert(MSGuidTagDecl && "building MS GUID without MS extensions?"); 11443 11444 llvm::FoldingSetNodeID ID; 11445 MSGuidDecl::Profile(ID, Parts); 11446 11447 void *InsertPos; 11448 if (MSGuidDecl *Existing = MSGuidDecls.FindNodeOrInsertPos(ID, InsertPos)) 11449 return Existing; 11450 11451 QualType GUIDType = getMSGuidType().withConst(); 11452 MSGuidDecl *New = MSGuidDecl::Create(*this, GUIDType, Parts); 11453 MSGuidDecls.InsertNode(New, InsertPos); 11454 return New; 11455 } 11456 11457 TemplateParamObjectDecl * 11458 ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const { 11459 assert(T->isRecordType() && "template param object of unexpected type"); 11460 11461 // C++ [temp.param]p8: 11462 // [...] a static storage duration object of type 'const T' [...] 11463 T.addConst(); 11464 11465 llvm::FoldingSetNodeID ID; 11466 TemplateParamObjectDecl::Profile(ID, T, V); 11467 11468 void *InsertPos; 11469 if (TemplateParamObjectDecl *Existing = 11470 TemplateParamObjectDecls.FindNodeOrInsertPos(ID, InsertPos)) 11471 return Existing; 11472 11473 TemplateParamObjectDecl *New = TemplateParamObjectDecl::Create(*this, T, V); 11474 TemplateParamObjectDecls.InsertNode(New, InsertPos); 11475 return New; 11476 } 11477 11478 bool ASTContext::AtomicUsesUnsupportedLibcall(const AtomicExpr *E) const { 11479 const llvm::Triple &T = getTargetInfo().getTriple(); 11480 if (!T.isOSDarwin()) 11481 return false; 11482 11483 if (!(T.isiOS() && T.isOSVersionLT(7)) && 11484 !(T.isMacOSX() && T.isOSVersionLT(10, 9))) 11485 return false; 11486 11487 QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); 11488 CharUnits sizeChars = getTypeSizeInChars(AtomicTy); 11489 uint64_t Size = sizeChars.getQuantity(); 11490 CharUnits alignChars = getTypeAlignInChars(AtomicTy); 11491 unsigned Align = alignChars.getQuantity(); 11492 unsigned MaxInlineWidthInBits = getTargetInfo().getMaxAtomicInlineWidth(); 11493 return (Size != Align || toBits(sizeChars) > MaxInlineWidthInBits); 11494 } 11495 11496 bool 11497 ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl, 11498 const ObjCMethodDecl *MethodImpl) { 11499 // No point trying to match an unavailable/deprecated mothod. 11500 if (MethodDecl->hasAttr<UnavailableAttr>() 11501 || MethodDecl->hasAttr<DeprecatedAttr>()) 11502 return false; 11503 if (MethodDecl->getObjCDeclQualifier() != 11504 MethodImpl->getObjCDeclQualifier()) 11505 return false; 11506 if (!hasSameType(MethodDecl->getReturnType(), MethodImpl->getReturnType())) 11507 return false; 11508 11509 if (MethodDecl->param_size() != MethodImpl->param_size()) 11510 return false; 11511 11512 for (ObjCMethodDecl::param_const_iterator IM = MethodImpl->param_begin(), 11513 IF = MethodDecl->param_begin(), EM = MethodImpl->param_end(), 11514 EF = MethodDecl->param_end(); 11515 IM != EM && IF != EF; ++IM, ++IF) { 11516 const ParmVarDecl *DeclVar = (*IF); 11517 const ParmVarDecl *ImplVar = (*IM); 11518 if (ImplVar->getObjCDeclQualifier() != DeclVar->getObjCDeclQualifier()) 11519 return false; 11520 if (!hasSameType(DeclVar->getType(), ImplVar->getType())) 11521 return false; 11522 } 11523 11524 return (MethodDecl->isVariadic() == MethodImpl->isVariadic()); 11525 } 11526 11527 uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const { 11528 LangAS AS; 11529 if (QT->getUnqualifiedDesugaredType()->isNullPtrType()) 11530 AS = LangAS::Default; 11531 else 11532 AS = QT->getPointeeType().getAddressSpace(); 11533 11534 return getTargetInfo().getNullPointerValue(AS); 11535 } 11536 11537 unsigned ASTContext::getTargetAddressSpace(LangAS AS) const { 11538 if (isTargetAddressSpace(AS)) 11539 return toTargetAddressSpace(AS); 11540 else 11541 return (*AddrSpaceMap)[(unsigned)AS]; 11542 } 11543 11544 QualType ASTContext::getCorrespondingSaturatedType(QualType Ty) const { 11545 assert(Ty->isFixedPointType()); 11546 11547 if (Ty->isSaturatedFixedPointType()) return Ty; 11548 11549 switch (Ty->castAs<BuiltinType>()->getKind()) { 11550 default: 11551 llvm_unreachable("Not a fixed point type!"); 11552 case BuiltinType::ShortAccum: 11553 return SatShortAccumTy; 11554 case BuiltinType::Accum: 11555 return SatAccumTy; 11556 case BuiltinType::LongAccum: 11557 return SatLongAccumTy; 11558 case BuiltinType::UShortAccum: 11559 return SatUnsignedShortAccumTy; 11560 case BuiltinType::UAccum: 11561 return SatUnsignedAccumTy; 11562 case BuiltinType::ULongAccum: 11563 return SatUnsignedLongAccumTy; 11564 case BuiltinType::ShortFract: 11565 return SatShortFractTy; 11566 case BuiltinType::Fract: 11567 return SatFractTy; 11568 case BuiltinType::LongFract: 11569 return SatLongFractTy; 11570 case BuiltinType::UShortFract: 11571 return SatUnsignedShortFractTy; 11572 case BuiltinType::UFract: 11573 return SatUnsignedFractTy; 11574 case BuiltinType::ULongFract: 11575 return SatUnsignedLongFractTy; 11576 } 11577 } 11578 11579 LangAS ASTContext::getLangASForBuiltinAddressSpace(unsigned AS) const { 11580 if (LangOpts.OpenCL) 11581 return getTargetInfo().getOpenCLBuiltinAddressSpace(AS); 11582 11583 if (LangOpts.CUDA) 11584 return getTargetInfo().getCUDABuiltinAddressSpace(AS); 11585 11586 return getLangASFromTargetAS(AS); 11587 } 11588 11589 // Explicitly instantiate this in case a Redeclarable<T> is used from a TU that 11590 // doesn't include ASTContext.h 11591 template 11592 clang::LazyGenerationalUpdatePtr< 11593 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::ValueType 11594 clang::LazyGenerationalUpdatePtr< 11595 const Decl *, Decl *, &ExternalASTSource::CompleteRedeclChain>::makeValue( 11596 const clang::ASTContext &Ctx, Decl *Value); 11597 11598 unsigned char ASTContext::getFixedPointScale(QualType Ty) const { 11599 assert(Ty->isFixedPointType()); 11600 11601 const TargetInfo &Target = getTargetInfo(); 11602 switch (Ty->castAs<BuiltinType>()->getKind()) { 11603 default: 11604 llvm_unreachable("Not a fixed point type!"); 11605 case BuiltinType::ShortAccum: 11606 case BuiltinType::SatShortAccum: 11607 return Target.getShortAccumScale(); 11608 case BuiltinType::Accum: 11609 case BuiltinType::SatAccum: 11610 return Target.getAccumScale(); 11611 case BuiltinType::LongAccum: 11612 case BuiltinType::SatLongAccum: 11613 return Target.getLongAccumScale(); 11614 case BuiltinType::UShortAccum: 11615 case BuiltinType::SatUShortAccum: 11616 return Target.getUnsignedShortAccumScale(); 11617 case BuiltinType::UAccum: 11618 case BuiltinType::SatUAccum: 11619 return Target.getUnsignedAccumScale(); 11620 case BuiltinType::ULongAccum: 11621 case BuiltinType::SatULongAccum: 11622 return Target.getUnsignedLongAccumScale(); 11623 case BuiltinType::ShortFract: 11624 case BuiltinType::SatShortFract: 11625 return Target.getShortFractScale(); 11626 case BuiltinType::Fract: 11627 case BuiltinType::SatFract: 11628 return Target.getFractScale(); 11629 case BuiltinType::LongFract: 11630 case BuiltinType::SatLongFract: 11631 return Target.getLongFractScale(); 11632 case BuiltinType::UShortFract: 11633 case BuiltinType::SatUShortFract: 11634 return Target.getUnsignedShortFractScale(); 11635 case BuiltinType::UFract: 11636 case BuiltinType::SatUFract: 11637 return Target.getUnsignedFractScale(); 11638 case BuiltinType::ULongFract: 11639 case BuiltinType::SatULongFract: 11640 return Target.getUnsignedLongFractScale(); 11641 } 11642 } 11643 11644 unsigned char ASTContext::getFixedPointIBits(QualType Ty) const { 11645 assert(Ty->isFixedPointType()); 11646 11647 const TargetInfo &Target = getTargetInfo(); 11648 switch (Ty->castAs<BuiltinType>()->getKind()) { 11649 default: 11650 llvm_unreachable("Not a fixed point type!"); 11651 case BuiltinType::ShortAccum: 11652 case BuiltinType::SatShortAccum: 11653 return Target.getShortAccumIBits(); 11654 case BuiltinType::Accum: 11655 case BuiltinType::SatAccum: 11656 return Target.getAccumIBits(); 11657 case BuiltinType::LongAccum: 11658 case BuiltinType::SatLongAccum: 11659 return Target.getLongAccumIBits(); 11660 case BuiltinType::UShortAccum: 11661 case BuiltinType::SatUShortAccum: 11662 return Target.getUnsignedShortAccumIBits(); 11663 case BuiltinType::UAccum: 11664 case BuiltinType::SatUAccum: 11665 return Target.getUnsignedAccumIBits(); 11666 case BuiltinType::ULongAccum: 11667 case BuiltinType::SatULongAccum: 11668 return Target.getUnsignedLongAccumIBits(); 11669 case BuiltinType::ShortFract: 11670 case BuiltinType::SatShortFract: 11671 case BuiltinType::Fract: 11672 case BuiltinType::SatFract: 11673 case BuiltinType::LongFract: 11674 case BuiltinType::SatLongFract: 11675 case BuiltinType::UShortFract: 11676 case BuiltinType::SatUShortFract: 11677 case BuiltinType::UFract: 11678 case BuiltinType::SatUFract: 11679 case BuiltinType::ULongFract: 11680 case BuiltinType::SatULongFract: 11681 return 0; 11682 } 11683 } 11684 11685 llvm::FixedPointSemantics 11686 ASTContext::getFixedPointSemantics(QualType Ty) const { 11687 assert((Ty->isFixedPointType() || Ty->isIntegerType()) && 11688 "Can only get the fixed point semantics for a " 11689 "fixed point or integer type."); 11690 if (Ty->isIntegerType()) 11691 return llvm::FixedPointSemantics::GetIntegerSemantics( 11692 getIntWidth(Ty), Ty->isSignedIntegerType()); 11693 11694 bool isSigned = Ty->isSignedFixedPointType(); 11695 return llvm::FixedPointSemantics( 11696 static_cast<unsigned>(getTypeSize(Ty)), getFixedPointScale(Ty), isSigned, 11697 Ty->isSaturatedFixedPointType(), 11698 !isSigned && getTargetInfo().doUnsignedFixedPointTypesHavePadding()); 11699 } 11700 11701 llvm::APFixedPoint ASTContext::getFixedPointMax(QualType Ty) const { 11702 assert(Ty->isFixedPointType()); 11703 return llvm::APFixedPoint::getMax(getFixedPointSemantics(Ty)); 11704 } 11705 11706 llvm::APFixedPoint ASTContext::getFixedPointMin(QualType Ty) const { 11707 assert(Ty->isFixedPointType()); 11708 return llvm::APFixedPoint::getMin(getFixedPointSemantics(Ty)); 11709 } 11710 11711 QualType ASTContext::getCorrespondingSignedFixedPointType(QualType Ty) const { 11712 assert(Ty->isUnsignedFixedPointType() && 11713 "Expected unsigned fixed point type"); 11714 11715 switch (Ty->castAs<BuiltinType>()->getKind()) { 11716 case BuiltinType::UShortAccum: 11717 return ShortAccumTy; 11718 case BuiltinType::UAccum: 11719 return AccumTy; 11720 case BuiltinType::ULongAccum: 11721 return LongAccumTy; 11722 case BuiltinType::SatUShortAccum: 11723 return SatShortAccumTy; 11724 case BuiltinType::SatUAccum: 11725 return SatAccumTy; 11726 case BuiltinType::SatULongAccum: 11727 return SatLongAccumTy; 11728 case BuiltinType::UShortFract: 11729 return ShortFractTy; 11730 case BuiltinType::UFract: 11731 return FractTy; 11732 case BuiltinType::ULongFract: 11733 return LongFractTy; 11734 case BuiltinType::SatUShortFract: 11735 return SatShortFractTy; 11736 case BuiltinType::SatUFract: 11737 return SatFractTy; 11738 case BuiltinType::SatULongFract: 11739 return SatLongFractTy; 11740 default: 11741 llvm_unreachable("Unexpected unsigned fixed point type"); 11742 } 11743 } 11744 11745 ParsedTargetAttr 11746 ASTContext::filterFunctionTargetAttrs(const TargetAttr *TD) const { 11747 assert(TD != nullptr); 11748 ParsedTargetAttr ParsedAttr = TD->parse(); 11749 11750 llvm::erase_if(ParsedAttr.Features, [&](const std::string &Feat) { 11751 return !Target->isValidFeatureName(StringRef{Feat}.substr(1)); 11752 }); 11753 return ParsedAttr; 11754 } 11755 11756 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 11757 const FunctionDecl *FD) const { 11758 if (FD) 11759 getFunctionFeatureMap(FeatureMap, GlobalDecl().getWithDecl(FD)); 11760 else 11761 Target->initFeatureMap(FeatureMap, getDiagnostics(), 11762 Target->getTargetOpts().CPU, 11763 Target->getTargetOpts().Features); 11764 } 11765 11766 // Fills in the supplied string map with the set of target features for the 11767 // passed in function. 11768 void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap, 11769 GlobalDecl GD) const { 11770 StringRef TargetCPU = Target->getTargetOpts().CPU; 11771 const FunctionDecl *FD = GD.getDecl()->getAsFunction(); 11772 if (const auto *TD = FD->getAttr<TargetAttr>()) { 11773 ParsedTargetAttr ParsedAttr = filterFunctionTargetAttrs(TD); 11774 11775 // Make a copy of the features as passed on the command line into the 11776 // beginning of the additional features from the function to override. 11777 ParsedAttr.Features.insert( 11778 ParsedAttr.Features.begin(), 11779 Target->getTargetOpts().FeaturesAsWritten.begin(), 11780 Target->getTargetOpts().FeaturesAsWritten.end()); 11781 11782 if (ParsedAttr.Architecture != "" && 11783 Target->isValidCPUName(ParsedAttr.Architecture)) 11784 TargetCPU = ParsedAttr.Architecture; 11785 11786 // Now populate the feature map, first with the TargetCPU which is either 11787 // the default or a new one from the target attribute string. Then we'll use 11788 // the passed in features (FeaturesAsWritten) along with the new ones from 11789 // the attribute. 11790 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, 11791 ParsedAttr.Features); 11792 } else if (const auto *SD = FD->getAttr<CPUSpecificAttr>()) { 11793 llvm::SmallVector<StringRef, 32> FeaturesTmp; 11794 Target->getCPUSpecificCPUDispatchFeatures( 11795 SD->getCPUName(GD.getMultiVersionIndex())->getName(), FeaturesTmp); 11796 std::vector<std::string> Features(FeaturesTmp.begin(), FeaturesTmp.end()); 11797 Features.insert(Features.begin(), 11798 Target->getTargetOpts().FeaturesAsWritten.begin(), 11799 Target->getTargetOpts().FeaturesAsWritten.end()); 11800 Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features); 11801 } else { 11802 FeatureMap = Target->getTargetOpts().FeatureMap; 11803 } 11804 } 11805 11806 OMPTraitInfo &ASTContext::getNewOMPTraitInfo() { 11807 OMPTraitInfoVector.emplace_back(new OMPTraitInfo()); 11808 return *OMPTraitInfoVector.back(); 11809 } 11810 11811 const StreamingDiagnostic &clang:: 11812 operator<<(const StreamingDiagnostic &DB, 11813 const ASTContext::SectionInfo &Section) { 11814 if (Section.Decl) 11815 return DB << Section.Decl; 11816 return DB << "a prior #pragma section"; 11817 } 11818 11819 bool ASTContext::mayExternalizeStaticVar(const Decl *D) const { 11820 bool IsStaticVar = 11821 isa<VarDecl>(D) && cast<VarDecl>(D)->getStorageClass() == SC_Static; 11822 bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() && 11823 !D->getAttr<CUDADeviceAttr>()->isImplicit()) || 11824 (D->hasAttr<CUDAConstantAttr>() && 11825 !D->getAttr<CUDAConstantAttr>()->isImplicit()); 11826 // CUDA/HIP: static managed variables need to be externalized since it is 11827 // a declaration in IR, therefore cannot have internal linkage. 11828 return IsStaticVar && 11829 (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar); 11830 } 11831 11832 bool ASTContext::shouldExternalizeStaticVar(const Decl *D) const { 11833 return mayExternalizeStaticVar(D) && 11834 (D->hasAttr<HIPManagedAttr>() || 11835 CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D))); 11836 } 11837 11838 StringRef ASTContext::getCUIDHash() const { 11839 if (!CUIDHash.empty()) 11840 return CUIDHash; 11841 if (LangOpts.CUID.empty()) 11842 return StringRef(); 11843 CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true); 11844 return CUIDHash; 11845 } 11846