1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This provides C++ code generation targeting the Itanium C++ ABI. The class 10 // in this file generates structures that follow the Itanium C++ ABI, which is 11 // documented at: 12 // https://itanium-cxx-abi.github.io/cxx-abi/abi.html 13 // https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html 14 // 15 // It also supports the closely-related ARM ABI, documented at: 16 // https://developer.arm.com/documentation/ihi0041/g/ 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "CGCXXABI.h" 21 #include "CGCleanup.h" 22 #include "CGRecordLayout.h" 23 #include "CGVTables.h" 24 #include "CodeGenFunction.h" 25 #include "CodeGenModule.h" 26 #include "TargetInfo.h" 27 #include "clang/AST/Attr.h" 28 #include "clang/AST/Mangle.h" 29 #include "clang/AST/StmtCXX.h" 30 #include "clang/AST/Type.h" 31 #include "clang/CodeGen/ConstantInitBuilder.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/GlobalValue.h" 34 #include "llvm/IR/Instructions.h" 35 #include "llvm/IR/Intrinsics.h" 36 #include "llvm/IR/Value.h" 37 #include "llvm/Support/ScopedPrinter.h" 38 39 #include <optional> 40 41 using namespace clang; 42 using namespace CodeGen; 43 44 namespace { 45 class ItaniumCXXABI : public CodeGen::CGCXXABI { 46 /// VTables - All the vtables which have been defined. 47 llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables; 48 49 /// All the thread wrapper functions that have been used. 50 llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8> 51 ThreadWrappers; 52 53 protected: 54 bool UseARMMethodPtrABI; 55 bool UseARMGuardVarABI; 56 bool Use32BitVTableOffsetABI; 57 58 ItaniumMangleContext &getMangleContext() { 59 return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext()); 60 } 61 62 public: 63 ItaniumCXXABI(CodeGen::CodeGenModule &CGM, 64 bool UseARMMethodPtrABI = false, 65 bool UseARMGuardVarABI = false) : 66 CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI), 67 UseARMGuardVarABI(UseARMGuardVarABI), 68 Use32BitVTableOffsetABI(false) { } 69 70 bool classifyReturnType(CGFunctionInfo &FI) const override; 71 72 RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override { 73 // If C++ prohibits us from making a copy, pass by address. 74 if (!RD->canPassInRegisters()) 75 return RAA_Indirect; 76 return RAA_Default; 77 } 78 79 bool isThisCompleteObject(GlobalDecl GD) const override { 80 // The Itanium ABI has separate complete-object vs. base-object 81 // variants of both constructors and destructors. 82 if (isa<CXXDestructorDecl>(GD.getDecl())) { 83 switch (GD.getDtorType()) { 84 case Dtor_Complete: 85 case Dtor_Deleting: 86 return true; 87 88 case Dtor_Base: 89 return false; 90 91 case Dtor_Comdat: 92 llvm_unreachable("emitting dtor comdat as function?"); 93 } 94 llvm_unreachable("bad dtor kind"); 95 } 96 if (isa<CXXConstructorDecl>(GD.getDecl())) { 97 switch (GD.getCtorType()) { 98 case Ctor_Complete: 99 return true; 100 101 case Ctor_Base: 102 return false; 103 104 case Ctor_CopyingClosure: 105 case Ctor_DefaultClosure: 106 llvm_unreachable("closure ctors in Itanium ABI?"); 107 108 case Ctor_Comdat: 109 llvm_unreachable("emitting ctor comdat as function?"); 110 } 111 llvm_unreachable("bad dtor kind"); 112 } 113 114 // No other kinds. 115 return false; 116 } 117 118 bool isZeroInitializable(const MemberPointerType *MPT) override; 119 120 llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override; 121 122 CGCallee 123 EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, 124 const Expr *E, 125 Address This, 126 llvm::Value *&ThisPtrForCall, 127 llvm::Value *MemFnPtr, 128 const MemberPointerType *MPT) override; 129 130 llvm::Value * 131 EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E, 132 Address Base, 133 llvm::Value *MemPtr, 134 const MemberPointerType *MPT) override; 135 136 llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF, 137 const CastExpr *E, 138 llvm::Value *Src) override; 139 llvm::Constant *EmitMemberPointerConversion(const CastExpr *E, 140 llvm::Constant *Src) override; 141 142 llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override; 143 144 llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override; 145 llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT, 146 CharUnits offset) override; 147 llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override; 148 llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD, 149 CharUnits ThisAdjustment); 150 151 llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF, 152 llvm::Value *L, llvm::Value *R, 153 const MemberPointerType *MPT, 154 bool Inequality) override; 155 156 llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 157 llvm::Value *Addr, 158 const MemberPointerType *MPT) override; 159 160 void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE, 161 Address Ptr, QualType ElementType, 162 const CXXDestructorDecl *Dtor) override; 163 164 void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override; 165 void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override; 166 167 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override; 168 169 llvm::CallInst * 170 emitTerminateForUnexpectedException(CodeGenFunction &CGF, 171 llvm::Value *Exn) override; 172 173 void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD); 174 llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override; 175 CatchTypeInfo 176 getAddrOfCXXCatchHandlerType(QualType Ty, 177 QualType CatchHandlerType) override { 178 return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0}; 179 } 180 181 bool shouldTypeidBeNullChecked(QualType SrcRecordTy) override; 182 void EmitBadTypeidCall(CodeGenFunction &CGF) override; 183 llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy, 184 Address ThisPtr, 185 llvm::Type *StdTypeInfoPtrTy) override; 186 187 bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, 188 QualType SrcRecordTy) override; 189 190 /// Determine whether we know that all instances of type RecordTy will have 191 /// the same vtable pointer values, that is distinct from all other vtable 192 /// pointers. While this is required by the Itanium ABI, it doesn't happen in 193 /// practice in some cases due to language extensions. 194 bool hasUniqueVTablePointer(QualType RecordTy) { 195 const CXXRecordDecl *RD = RecordTy->getAsCXXRecordDecl(); 196 197 // Under -fapple-kext, multiple definitions of the same vtable may be 198 // emitted. 199 if (!CGM.getCodeGenOpts().AssumeUniqueVTables || 200 getContext().getLangOpts().AppleKext) 201 return false; 202 203 // If the type_info* would be null, the vtable might be merged with that of 204 // another type. 205 if (!CGM.shouldEmitRTTI()) 206 return false; 207 208 // If there's only one definition of the vtable in the program, it has a 209 // unique address. 210 if (!llvm::GlobalValue::isWeakForLinker(CGM.getVTableLinkage(RD))) 211 return true; 212 213 // Even if there are multiple definitions of the vtable, they are required 214 // by the ABI to use the same symbol name, so should be merged at load 215 // time. However, if the class has hidden visibility, there can be 216 // different versions of the class in different modules, and the ABI 217 // library might treat them as being the same. 218 if (CGM.GetLLVMVisibility(RD->getVisibility()) != 219 llvm::GlobalValue::DefaultVisibility) 220 return false; 221 222 return true; 223 } 224 225 bool shouldEmitExactDynamicCast(QualType DestRecordTy) override { 226 return hasUniqueVTablePointer(DestRecordTy); 227 } 228 229 llvm::Value *emitDynamicCastCall(CodeGenFunction &CGF, Address Value, 230 QualType SrcRecordTy, QualType DestTy, 231 QualType DestRecordTy, 232 llvm::BasicBlock *CastEnd) override; 233 234 llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address ThisAddr, 235 QualType SrcRecordTy, QualType DestTy, 236 QualType DestRecordTy, 237 llvm::BasicBlock *CastSuccess, 238 llvm::BasicBlock *CastFail) override; 239 240 llvm::Value *emitDynamicCastToVoid(CodeGenFunction &CGF, Address Value, 241 QualType SrcRecordTy) override; 242 243 bool EmitBadCastCall(CodeGenFunction &CGF) override; 244 245 llvm::Value * 246 GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This, 247 const CXXRecordDecl *ClassDecl, 248 const CXXRecordDecl *BaseClassDecl) override; 249 250 void EmitCXXConstructors(const CXXConstructorDecl *D) override; 251 252 AddedStructorArgCounts 253 buildStructorSignature(GlobalDecl GD, 254 SmallVectorImpl<CanQualType> &ArgTys) override; 255 256 bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, 257 CXXDtorType DT) const override { 258 // Itanium does not emit any destructor variant as an inline thunk. 259 // Delegating may occur as an optimization, but all variants are either 260 // emitted with external linkage or as linkonce if they are inline and used. 261 return false; 262 } 263 264 void EmitCXXDestructors(const CXXDestructorDecl *D) override; 265 266 void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, 267 FunctionArgList &Params) override; 268 269 void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override; 270 271 AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF, 272 const CXXConstructorDecl *D, 273 CXXCtorType Type, 274 bool ForVirtualBase, 275 bool Delegating) override; 276 277 llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF, 278 const CXXDestructorDecl *DD, 279 CXXDtorType Type, 280 bool ForVirtualBase, 281 bool Delegating) override; 282 283 void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD, 284 CXXDtorType Type, bool ForVirtualBase, 285 bool Delegating, Address This, 286 QualType ThisTy) override; 287 288 void emitVTableDefinitions(CodeGenVTables &CGVT, 289 const CXXRecordDecl *RD) override; 290 291 bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF, 292 CodeGenFunction::VPtr Vptr) override; 293 294 bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override { 295 return true; 296 } 297 298 llvm::Constant * 299 getVTableAddressPoint(BaseSubobject Base, 300 const CXXRecordDecl *VTableClass) override; 301 302 llvm::Value *getVTableAddressPointInStructor( 303 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, 304 BaseSubobject Base, const CXXRecordDecl *NearestVBase) override; 305 306 llvm::Value *getVTableAddressPointInStructorWithVTT( 307 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, 308 BaseSubobject Base, const CXXRecordDecl *NearestVBase); 309 310 llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD, 311 CharUnits VPtrOffset) override; 312 313 CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, 314 Address This, llvm::Type *Ty, 315 SourceLocation Loc) override; 316 317 llvm::Value * 318 EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, 319 CXXDtorType DtorType, Address This, 320 DeleteOrMemberCallExpr E, 321 llvm::CallBase **CallOrInvoke) override; 322 323 void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override; 324 325 bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override; 326 bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const; 327 328 void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD, 329 bool ReturnAdjustment) override { 330 // Allow inlining of thunks by emitting them with available_externally 331 // linkage together with vtables when needed. 332 if (ForVTable && !Thunk->hasLocalLinkage()) 333 Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage); 334 CGM.setGVProperties(Thunk, GD); 335 } 336 337 bool exportThunk() override { return true; } 338 339 llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This, 340 const CXXRecordDecl *UnadjustedThisClass, 341 const ThunkInfo &TI) override; 342 343 llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret, 344 const CXXRecordDecl *UnadjustedRetClass, 345 const ReturnAdjustment &RA) override; 346 347 size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, 348 FunctionArgList &Args) const override { 349 assert(!Args.empty() && "expected the arglist to not be empty!"); 350 return Args.size() - 1; 351 } 352 353 StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; } 354 StringRef GetDeletedVirtualCallName() override 355 { return "__cxa_deleted_virtual"; } 356 357 CharUnits getArrayCookieSizeImpl(QualType elementType) override; 358 Address InitializeArrayCookie(CodeGenFunction &CGF, 359 Address NewPtr, 360 llvm::Value *NumElements, 361 const CXXNewExpr *expr, 362 QualType ElementType) override; 363 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, 364 Address allocPtr, 365 CharUnits cookieSize) override; 366 367 void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, 368 llvm::GlobalVariable *DeclPtr, 369 bool PerformInit) override; 370 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 371 llvm::FunctionCallee dtor, 372 llvm::Constant *addr) override; 373 374 llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD, 375 llvm::Value *Val); 376 void EmitThreadLocalInitFuncs( 377 CodeGenModule &CGM, 378 ArrayRef<const VarDecl *> CXXThreadLocals, 379 ArrayRef<llvm::Function *> CXXThreadLocalInits, 380 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override; 381 382 bool usesThreadWrapperFunction(const VarDecl *VD) const override { 383 return !isEmittedWithConstantInitializer(VD) || 384 mayNeedDestruction(VD); 385 } 386 LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, 387 QualType LValType) override; 388 389 bool NeedsVTTParameter(GlobalDecl GD) override; 390 391 llvm::Constant * 392 getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD); 393 394 /**************************** RTTI Uniqueness ******************************/ 395 396 protected: 397 /// Returns true if the ABI requires RTTI type_info objects to be unique 398 /// across a program. 399 virtual bool shouldRTTIBeUnique() const { return true; } 400 401 public: 402 /// What sort of unique-RTTI behavior should we use? 403 enum RTTIUniquenessKind { 404 /// We are guaranteeing, or need to guarantee, that the RTTI string 405 /// is unique. 406 RUK_Unique, 407 408 /// We are not guaranteeing uniqueness for the RTTI string, so we 409 /// can demote to hidden visibility but must use string comparisons. 410 RUK_NonUniqueHidden, 411 412 /// We are not guaranteeing uniqueness for the RTTI string, so we 413 /// have to use string comparisons, but we also have to emit it with 414 /// non-hidden visibility. 415 RUK_NonUniqueVisible 416 }; 417 418 /// Return the required visibility status for the given type and linkage in 419 /// the current ABI. 420 RTTIUniquenessKind 421 classifyRTTIUniqueness(QualType CanTy, 422 llvm::GlobalValue::LinkageTypes Linkage) const; 423 friend class ItaniumRTTIBuilder; 424 425 void emitCXXStructor(GlobalDecl GD) override; 426 427 std::pair<llvm::Value *, const CXXRecordDecl *> 428 LoadVTablePtr(CodeGenFunction &CGF, Address This, 429 const CXXRecordDecl *RD) override; 430 431 private: 432 llvm::Constant * 433 getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD); 434 435 bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const { 436 const auto &VtableLayout = 437 CGM.getItaniumVTableContext().getVTableLayout(RD); 438 439 for (const auto &VtableComponent : VtableLayout.vtable_components()) { 440 // Skip empty slot. 441 if (!VtableComponent.isUsedFunctionPointerKind()) 442 continue; 443 444 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl(); 445 const FunctionDecl *FD = Method->getDefinition(); 446 const bool IsInlined = 447 Method->getCanonicalDecl()->isInlined() || (FD && FD->isInlined()); 448 if (!IsInlined) 449 continue; 450 451 StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl()); 452 auto *Entry = CGM.GetGlobalValue(Name); 453 // This checks if virtual inline function has already been emitted. 454 // Note that it is possible that this inline function would be emitted 455 // after trying to emit vtable speculatively. Because of this we do 456 // an extra pass after emitting all deferred vtables to find and emit 457 // these vtables opportunistically. 458 if (!Entry || Entry->isDeclaration()) 459 return true; 460 } 461 return false; 462 } 463 464 bool isVTableHidden(const CXXRecordDecl *RD) const { 465 const auto &VtableLayout = 466 CGM.getItaniumVTableContext().getVTableLayout(RD); 467 468 for (const auto &VtableComponent : VtableLayout.vtable_components()) { 469 if (VtableComponent.isRTTIKind()) { 470 const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl(); 471 if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility) 472 return true; 473 } else if (VtableComponent.isUsedFunctionPointerKind()) { 474 const CXXMethodDecl *Method = VtableComponent.getFunctionDecl(); 475 if (Method->getVisibility() == Visibility::HiddenVisibility && 476 !Method->isDefined()) 477 return true; 478 } 479 } 480 return false; 481 } 482 }; 483 484 class ARMCXXABI : public ItaniumCXXABI { 485 public: 486 ARMCXXABI(CodeGen::CodeGenModule &CGM) : 487 ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true, 488 /*UseARMGuardVarABI=*/true) {} 489 490 bool constructorsAndDestructorsReturnThis() const override { return true; } 491 492 void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, 493 QualType ResTy) override; 494 495 CharUnits getArrayCookieSizeImpl(QualType elementType) override; 496 Address InitializeArrayCookie(CodeGenFunction &CGF, 497 Address NewPtr, 498 llvm::Value *NumElements, 499 const CXXNewExpr *expr, 500 QualType ElementType) override; 501 llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr, 502 CharUnits cookieSize) override; 503 }; 504 505 class AppleARM64CXXABI : public ARMCXXABI { 506 public: 507 AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) { 508 Use32BitVTableOffsetABI = true; 509 } 510 511 // ARM64 libraries are prepared for non-unique RTTI. 512 bool shouldRTTIBeUnique() const override { return false; } 513 }; 514 515 class FuchsiaCXXABI final : public ItaniumCXXABI { 516 public: 517 explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM) 518 : ItaniumCXXABI(CGM) {} 519 520 private: 521 bool constructorsAndDestructorsReturnThis() const override { return true; } 522 }; 523 524 class WebAssemblyCXXABI final : public ItaniumCXXABI { 525 public: 526 explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM) 527 : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true, 528 /*UseARMGuardVarABI=*/true) {} 529 void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override; 530 llvm::CallInst * 531 emitTerminateForUnexpectedException(CodeGenFunction &CGF, 532 llvm::Value *Exn) override; 533 534 private: 535 bool constructorsAndDestructorsReturnThis() const override { return true; } 536 bool canCallMismatchedFunctionType() const override { return false; } 537 }; 538 539 class XLCXXABI final : public ItaniumCXXABI { 540 public: 541 explicit XLCXXABI(CodeGen::CodeGenModule &CGM) 542 : ItaniumCXXABI(CGM) {} 543 544 void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 545 llvm::FunctionCallee dtor, 546 llvm::Constant *addr) override; 547 548 bool useSinitAndSterm() const override { return true; } 549 550 private: 551 void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub, 552 llvm::Constant *addr); 553 }; 554 } 555 556 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) { 557 switch (CGM.getContext().getCXXABIKind()) { 558 // For IR-generation purposes, there's no significant difference 559 // between the ARM and iOS ABIs. 560 case TargetCXXABI::GenericARM: 561 case TargetCXXABI::iOS: 562 case TargetCXXABI::WatchOS: 563 return new ARMCXXABI(CGM); 564 565 case TargetCXXABI::AppleARM64: 566 return new AppleARM64CXXABI(CGM); 567 568 case TargetCXXABI::Fuchsia: 569 return new FuchsiaCXXABI(CGM); 570 571 // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't 572 // include the other 32-bit ARM oddities: constructor/destructor return values 573 // and array cookies. 574 case TargetCXXABI::GenericAArch64: 575 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true, 576 /*UseARMGuardVarABI=*/true); 577 578 case TargetCXXABI::GenericMIPS: 579 return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true); 580 581 case TargetCXXABI::WebAssembly: 582 return new WebAssemblyCXXABI(CGM); 583 584 case TargetCXXABI::XL: 585 return new XLCXXABI(CGM); 586 587 case TargetCXXABI::GenericItanium: 588 return new ItaniumCXXABI(CGM); 589 590 case TargetCXXABI::Microsoft: 591 llvm_unreachable("Microsoft ABI is not Itanium-based"); 592 } 593 llvm_unreachable("bad ABI kind"); 594 } 595 596 llvm::Type * 597 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) { 598 if (MPT->isMemberDataPointer()) 599 return CGM.PtrDiffTy; 600 return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy); 601 } 602 603 /// In the Itanium and ARM ABIs, method pointers have the form: 604 /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr; 605 /// 606 /// In the Itanium ABI: 607 /// - method pointers are virtual if (memptr.ptr & 1) is nonzero 608 /// - the this-adjustment is (memptr.adj) 609 /// - the virtual offset is (memptr.ptr - 1) 610 /// 611 /// In the ARM ABI: 612 /// - method pointers are virtual if (memptr.adj & 1) is nonzero 613 /// - the this-adjustment is (memptr.adj >> 1) 614 /// - the virtual offset is (memptr.ptr) 615 /// ARM uses 'adj' for the virtual flag because Thumb functions 616 /// may be only single-byte aligned. 617 /// 618 /// If the member is virtual, the adjusted 'this' pointer points 619 /// to a vtable pointer from which the virtual offset is applied. 620 /// 621 /// If the member is non-virtual, memptr.ptr is the address of 622 /// the function to call. 623 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer( 624 CodeGenFunction &CGF, const Expr *E, Address ThisAddr, 625 llvm::Value *&ThisPtrForCall, 626 llvm::Value *MemFnPtr, const MemberPointerType *MPT) { 627 CGBuilderTy &Builder = CGF.Builder; 628 629 const FunctionProtoType *FPT = 630 MPT->getPointeeType()->castAs<FunctionProtoType>(); 631 auto *RD = 632 cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl()); 633 634 llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1); 635 636 llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual"); 637 llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual"); 638 llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end"); 639 640 // Extract memptr.adj, which is in the second field. 641 llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj"); 642 643 // Compute the true adjustment. 644 llvm::Value *Adj = RawAdj; 645 if (UseARMMethodPtrABI) 646 Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted"); 647 648 // Apply the adjustment and cast back to the original struct type 649 // for consistency. 650 llvm::Value *This = ThisAddr.emitRawPointer(CGF); 651 This = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), This, Adj); 652 ThisPtrForCall = This; 653 654 // Load the function pointer. 655 llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr"); 656 657 // If the LSB in the function pointer is 1, the function pointer points to 658 // a virtual function. 659 llvm::Value *IsVirtual; 660 if (UseARMMethodPtrABI) 661 IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1); 662 else 663 IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1); 664 IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual"); 665 Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual); 666 667 // In the virtual path, the adjustment left 'This' pointing to the 668 // vtable of the correct base subobject. The "function pointer" is an 669 // offset within the vtable (+1 for the virtual flag on non-ARM). 670 CGF.EmitBlock(FnVirtual); 671 672 // Cast the adjusted this to a pointer to vtable pointer and load. 673 llvm::Type *VTableTy = CGF.CGM.GlobalsInt8PtrTy; 674 CharUnits VTablePtrAlign = 675 CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD, 676 CGF.getPointerAlign()); 677 llvm::Value *VTable = CGF.GetVTablePtr( 678 Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD); 679 680 // Apply the offset. 681 // On ARM64, to reserve extra space in virtual member function pointers, 682 // we only pay attention to the low 32 bits of the offset. 683 llvm::Value *VTableOffset = FnAsInt; 684 if (!UseARMMethodPtrABI) 685 VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1); 686 if (Use32BitVTableOffsetABI) { 687 VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty); 688 VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy); 689 } 690 691 // Check the address of the function pointer if CFI on member function 692 // pointers is enabled. 693 llvm::Constant *CheckSourceLocation; 694 llvm::Constant *CheckTypeDesc; 695 bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) && 696 CGM.HasHiddenLTOVisibility(RD); 697 bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination && 698 CGM.HasHiddenLTOVisibility(RD); 699 bool ShouldEmitWPDInfo = 700 CGM.getCodeGenOpts().WholeProgramVTables && 701 // Don't insert type tests if we are forcing public visibility. 702 !CGM.AlwaysHasLTOVisibilityPublic(RD); 703 llvm::Value *VirtualFn = nullptr; 704 705 { 706 CodeGenFunction::SanitizerScope SanScope(&CGF); 707 llvm::Value *TypeId = nullptr; 708 llvm::Value *CheckResult = nullptr; 709 710 if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) { 711 // If doing CFI, VFE or WPD, we will need the metadata node to check 712 // against. 713 llvm::Metadata *MD = 714 CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0)); 715 TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD); 716 } 717 718 if (ShouldEmitVFEInfo) { 719 llvm::Value *VFPAddr = 720 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset); 721 722 // If doing VFE, load from the vtable with a type.checked.load intrinsic 723 // call. Note that we use the GEP to calculate the address to load from 724 // and pass 0 as the offset to the intrinsic. This is because every 725 // vtable slot of the correct type is marked with matching metadata, and 726 // we know that the load must be from one of these slots. 727 llvm::Value *CheckedLoad = Builder.CreateCall( 728 CGM.getIntrinsic(llvm::Intrinsic::type_checked_load), 729 {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId}); 730 CheckResult = Builder.CreateExtractValue(CheckedLoad, 1); 731 VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0); 732 } else { 733 // When not doing VFE, emit a normal load, as it allows more 734 // optimisations than type.checked.load. 735 if (ShouldEmitCFICheck || ShouldEmitWPDInfo) { 736 llvm::Value *VFPAddr = 737 Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset); 738 llvm::Intrinsic::ID IID = CGM.HasHiddenLTOVisibility(RD) 739 ? llvm::Intrinsic::type_test 740 : llvm::Intrinsic::public_type_test; 741 742 CheckResult = 743 Builder.CreateCall(CGM.getIntrinsic(IID), {VFPAddr, TypeId}); 744 } 745 746 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 747 VirtualFn = CGF.Builder.CreateCall( 748 CGM.getIntrinsic(llvm::Intrinsic::load_relative, 749 {VTableOffset->getType()}), 750 {VTable, VTableOffset}); 751 } else { 752 llvm::Value *VFPAddr = 753 CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset); 754 VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.UnqualPtrTy, VFPAddr, 755 CGF.getPointerAlign(), 756 "memptr.virtualfn"); 757 } 758 } 759 assert(VirtualFn && "Virtual fuction pointer not created!"); 760 assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo || 761 CheckResult) && 762 "Check result required but not created!"); 763 764 if (ShouldEmitCFICheck) { 765 // If doing CFI, emit the check. 766 CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc()); 767 CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0)); 768 llvm::Constant *StaticData[] = { 769 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall), 770 CheckSourceLocation, 771 CheckTypeDesc, 772 }; 773 774 if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) { 775 CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail); 776 } else { 777 llvm::Value *AllVtables = llvm::MetadataAsValue::get( 778 CGM.getLLVMContext(), 779 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables")); 780 llvm::Value *ValidVtable = Builder.CreateCall( 781 CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables}); 782 CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::SO_CFIMFCall), 783 SanitizerHandler::CFICheckFail, StaticData, 784 {VTable, ValidVtable}); 785 } 786 787 FnVirtual = Builder.GetInsertBlock(); 788 } 789 } // End of sanitizer scope 790 791 CGF.EmitBranch(FnEnd); 792 793 // In the non-virtual path, the function pointer is actually a 794 // function pointer. 795 CGF.EmitBlock(FnNonVirtual); 796 llvm::Value *NonVirtualFn = 797 Builder.CreateIntToPtr(FnAsInt, CGF.UnqualPtrTy, "memptr.nonvirtualfn"); 798 799 // Check the function pointer if CFI on member function pointers is enabled. 800 if (ShouldEmitCFICheck) { 801 CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl(); 802 if (RD->hasDefinition()) { 803 CodeGenFunction::SanitizerScope SanScope(&CGF); 804 805 llvm::Constant *StaticData[] = { 806 llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall), 807 CheckSourceLocation, 808 CheckTypeDesc, 809 }; 810 811 llvm::Value *Bit = Builder.getFalse(); 812 for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) { 813 llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType( 814 getContext().getMemberPointerType( 815 MPT->getPointeeType(), 816 getContext().getRecordType(Base).getTypePtr())); 817 llvm::Value *TypeId = 818 llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD); 819 820 llvm::Value *TypeTest = 821 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test), 822 {NonVirtualFn, TypeId}); 823 Bit = Builder.CreateOr(Bit, TypeTest); 824 } 825 826 CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::SO_CFIMFCall), 827 SanitizerHandler::CFICheckFail, StaticData, 828 {NonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)}); 829 830 FnNonVirtual = Builder.GetInsertBlock(); 831 } 832 } 833 834 // We're done. 835 CGF.EmitBlock(FnEnd); 836 llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.UnqualPtrTy, 2); 837 CalleePtr->addIncoming(VirtualFn, FnVirtual); 838 CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual); 839 840 CGPointerAuthInfo PointerAuth; 841 842 if (const auto &Schema = 843 CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers) { 844 llvm::PHINode *DiscriminatorPHI = Builder.CreatePHI(CGF.IntPtrTy, 2); 845 DiscriminatorPHI->addIncoming(llvm::ConstantInt::get(CGF.IntPtrTy, 0), 846 FnVirtual); 847 const auto &AuthInfo = 848 CGM.getMemberFunctionPointerAuthInfo(QualType(MPT, 0)); 849 assert(Schema.getKey() == AuthInfo.getKey() && 850 "Keys for virtual and non-virtual member functions must match"); 851 auto *NonVirtualDiscriminator = AuthInfo.getDiscriminator(); 852 DiscriminatorPHI->addIncoming(NonVirtualDiscriminator, FnNonVirtual); 853 PointerAuth = CGPointerAuthInfo( 854 Schema.getKey(), Schema.getAuthenticationMode(), Schema.isIsaPointer(), 855 Schema.authenticatesNullValues(), DiscriminatorPHI); 856 } 857 858 CGCallee Callee(FPT, CalleePtr, PointerAuth); 859 return Callee; 860 } 861 862 /// Compute an l-value by applying the given pointer-to-member to a 863 /// base object. 864 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress( 865 CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr, 866 const MemberPointerType *MPT) { 867 assert(MemPtr->getType() == CGM.PtrDiffTy); 868 869 CGBuilderTy &Builder = CGF.Builder; 870 871 // Apply the offset, which we assume is non-null. 872 return Builder.CreateInBoundsGEP(CGF.Int8Ty, Base.emitRawPointer(CGF), MemPtr, 873 "memptr.offset"); 874 } 875 876 // See if it's possible to return a constant signed pointer. 877 static llvm::Constant *pointerAuthResignConstant( 878 llvm::Value *Ptr, const CGPointerAuthInfo &CurAuthInfo, 879 const CGPointerAuthInfo &NewAuthInfo, CodeGenModule &CGM) { 880 const auto *CPA = dyn_cast<llvm::ConstantPtrAuth>(Ptr); 881 882 if (!CPA) 883 return nullptr; 884 885 assert(CPA->getKey()->getZExtValue() == CurAuthInfo.getKey() && 886 CPA->getAddrDiscriminator()->isZeroValue() && 887 CPA->getDiscriminator() == CurAuthInfo.getDiscriminator() && 888 "unexpected key or discriminators"); 889 890 return CGM.getConstantSignedPointer( 891 CPA->getPointer(), NewAuthInfo.getKey(), nullptr, 892 cast<llvm::ConstantInt>(NewAuthInfo.getDiscriminator())); 893 } 894 895 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer 896 /// conversion. 897 /// 898 /// Bitcast conversions are always a no-op under Itanium. 899 /// 900 /// Obligatory offset/adjustment diagram: 901 /// <-- offset --> <-- adjustment --> 902 /// |--------------------------|----------------------|--------------------| 903 /// ^Derived address point ^Base address point ^Member address point 904 /// 905 /// So when converting a base member pointer to a derived member pointer, 906 /// we add the offset to the adjustment because the address point has 907 /// decreased; and conversely, when converting a derived MP to a base MP 908 /// we subtract the offset from the adjustment because the address point 909 /// has increased. 910 /// 911 /// The standard forbids (at compile time) conversion to and from 912 /// virtual bases, which is why we don't have to consider them here. 913 /// 914 /// The standard forbids (at run time) casting a derived MP to a base 915 /// MP when the derived MP does not point to a member of the base. 916 /// This is why -1 is a reasonable choice for null data member 917 /// pointers. 918 llvm::Value * 919 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF, 920 const CastExpr *E, 921 llvm::Value *src) { 922 // Use constant emission if we can. 923 if (isa<llvm::Constant>(src)) 924 return EmitMemberPointerConversion(E, cast<llvm::Constant>(src)); 925 926 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 927 E->getCastKind() == CK_BaseToDerivedMemberPointer || 928 E->getCastKind() == CK_ReinterpretMemberPointer); 929 930 CGBuilderTy &Builder = CGF.Builder; 931 QualType DstType = E->getType(); 932 933 if (DstType->isMemberFunctionPointerType()) { 934 if (const auto &NewAuthInfo = 935 CGM.getMemberFunctionPointerAuthInfo(DstType)) { 936 QualType SrcType = E->getSubExpr()->getType(); 937 assert(SrcType->isMemberFunctionPointerType()); 938 const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(SrcType); 939 llvm::Value *MemFnPtr = Builder.CreateExtractValue(src, 0, "memptr.ptr"); 940 llvm::Type *OrigTy = MemFnPtr->getType(); 941 942 llvm::BasicBlock *StartBB = Builder.GetInsertBlock(); 943 llvm::BasicBlock *ResignBB = CGF.createBasicBlock("resign"); 944 llvm::BasicBlock *MergeBB = CGF.createBasicBlock("merge"); 945 946 // Check whether we have a virtual offset or a pointer to a function. 947 assert(UseARMMethodPtrABI && "ARM ABI expected"); 948 llvm::Value *Adj = Builder.CreateExtractValue(src, 1, "memptr.adj"); 949 llvm::Constant *Ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1); 950 llvm::Value *AndVal = Builder.CreateAnd(Adj, Ptrdiff_1); 951 llvm::Value *IsVirtualOffset = 952 Builder.CreateIsNotNull(AndVal, "is.virtual.offset"); 953 Builder.CreateCondBr(IsVirtualOffset, MergeBB, ResignBB); 954 955 CGF.EmitBlock(ResignBB); 956 llvm::Type *PtrTy = llvm::PointerType::getUnqual(CGM.Int8Ty); 957 MemFnPtr = Builder.CreateIntToPtr(MemFnPtr, PtrTy); 958 MemFnPtr = 959 CGF.emitPointerAuthResign(MemFnPtr, SrcType, CurAuthInfo, NewAuthInfo, 960 isa<llvm::Constant>(src)); 961 MemFnPtr = Builder.CreatePtrToInt(MemFnPtr, OrigTy); 962 llvm::Value *ResignedVal = Builder.CreateInsertValue(src, MemFnPtr, 0); 963 ResignBB = Builder.GetInsertBlock(); 964 965 CGF.EmitBlock(MergeBB); 966 llvm::PHINode *NewSrc = Builder.CreatePHI(src->getType(), 2); 967 NewSrc->addIncoming(src, StartBB); 968 NewSrc->addIncoming(ResignedVal, ResignBB); 969 src = NewSrc; 970 } 971 } 972 973 // Under Itanium, reinterprets don't require any additional processing. 974 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 975 976 llvm::Constant *adj = getMemberPointerAdjustment(E); 977 if (!adj) return src; 978 979 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 980 981 const MemberPointerType *destTy = 982 E->getType()->castAs<MemberPointerType>(); 983 984 // For member data pointers, this is just a matter of adding the 985 // offset if the source is non-null. 986 if (destTy->isMemberDataPointer()) { 987 llvm::Value *dst; 988 if (isDerivedToBase) 989 dst = Builder.CreateNSWSub(src, adj, "adj"); 990 else 991 dst = Builder.CreateNSWAdd(src, adj, "adj"); 992 993 // Null check. 994 llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType()); 995 llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull"); 996 return Builder.CreateSelect(isNull, src, dst); 997 } 998 999 // The this-adjustment is left-shifted by 1 on ARM. 1000 if (UseARMMethodPtrABI) { 1001 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 1002 offset <<= 1; 1003 adj = llvm::ConstantInt::get(adj->getType(), offset); 1004 } 1005 1006 llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj"); 1007 llvm::Value *dstAdj; 1008 if (isDerivedToBase) 1009 dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj"); 1010 else 1011 dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj"); 1012 1013 return Builder.CreateInsertValue(src, dstAdj, 1); 1014 } 1015 1016 static llvm::Constant * 1017 pointerAuthResignMemberFunctionPointer(llvm::Constant *Src, QualType DestType, 1018 QualType SrcType, CodeGenModule &CGM) { 1019 assert(DestType->isMemberFunctionPointerType() && 1020 SrcType->isMemberFunctionPointerType() && 1021 "member function pointers expected"); 1022 if (DestType == SrcType) 1023 return Src; 1024 1025 const auto &NewAuthInfo = CGM.getMemberFunctionPointerAuthInfo(DestType); 1026 const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(SrcType); 1027 1028 if (!NewAuthInfo && !CurAuthInfo) 1029 return Src; 1030 1031 llvm::Constant *MemFnPtr = Src->getAggregateElement(0u); 1032 if (MemFnPtr->getNumOperands() == 0) { 1033 // src must be a pair of null pointers. 1034 assert(isa<llvm::ConstantInt>(MemFnPtr) && "constant int expected"); 1035 return Src; 1036 } 1037 1038 llvm::Constant *ConstPtr = pointerAuthResignConstant( 1039 cast<llvm::User>(MemFnPtr)->getOperand(0), CurAuthInfo, NewAuthInfo, CGM); 1040 ConstPtr = llvm::ConstantExpr::getPtrToInt(ConstPtr, MemFnPtr->getType()); 1041 return ConstantFoldInsertValueInstruction(Src, ConstPtr, 0); 1042 } 1043 1044 llvm::Constant * 1045 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E, 1046 llvm::Constant *src) { 1047 assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || 1048 E->getCastKind() == CK_BaseToDerivedMemberPointer || 1049 E->getCastKind() == CK_ReinterpretMemberPointer); 1050 1051 QualType DstType = E->getType(); 1052 1053 if (DstType->isMemberFunctionPointerType()) 1054 src = pointerAuthResignMemberFunctionPointer( 1055 src, DstType, E->getSubExpr()->getType(), CGM); 1056 1057 // Under Itanium, reinterprets don't require any additional processing. 1058 if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; 1059 1060 // If the adjustment is trivial, we don't need to do anything. 1061 llvm::Constant *adj = getMemberPointerAdjustment(E); 1062 if (!adj) return src; 1063 1064 bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); 1065 1066 const MemberPointerType *destTy = 1067 E->getType()->castAs<MemberPointerType>(); 1068 1069 // For member data pointers, this is just a matter of adding the 1070 // offset if the source is non-null. 1071 if (destTy->isMemberDataPointer()) { 1072 // null maps to null. 1073 if (src->isAllOnesValue()) return src; 1074 1075 if (isDerivedToBase) 1076 return llvm::ConstantExpr::getNSWSub(src, adj); 1077 else 1078 return llvm::ConstantExpr::getNSWAdd(src, adj); 1079 } 1080 1081 // The this-adjustment is left-shifted by 1 on ARM. 1082 if (UseARMMethodPtrABI) { 1083 uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); 1084 offset <<= 1; 1085 adj = llvm::ConstantInt::get(adj->getType(), offset); 1086 } 1087 1088 llvm::Constant *srcAdj = src->getAggregateElement(1); 1089 llvm::Constant *dstAdj; 1090 if (isDerivedToBase) 1091 dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj); 1092 else 1093 dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj); 1094 1095 llvm::Constant *res = ConstantFoldInsertValueInstruction(src, dstAdj, 1); 1096 assert(res != nullptr && "Folding must succeed"); 1097 return res; 1098 } 1099 1100 llvm::Constant * 1101 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) { 1102 // Itanium C++ ABI 2.3: 1103 // A NULL pointer is represented as -1. 1104 if (MPT->isMemberDataPointer()) 1105 return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true); 1106 1107 llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0); 1108 llvm::Constant *Values[2] = { Zero, Zero }; 1109 return llvm::ConstantStruct::getAnon(Values); 1110 } 1111 1112 llvm::Constant * 1113 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT, 1114 CharUnits offset) { 1115 // Itanium C++ ABI 2.3: 1116 // A pointer to data member is an offset from the base address of 1117 // the class object containing it, represented as a ptrdiff_t 1118 return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()); 1119 } 1120 1121 llvm::Constant * 1122 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) { 1123 return BuildMemberPointer(MD, CharUnits::Zero()); 1124 } 1125 1126 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD, 1127 CharUnits ThisAdjustment) { 1128 assert(MD->isInstance() && "Member function must not be static!"); 1129 1130 CodeGenTypes &Types = CGM.getTypes(); 1131 1132 // Get the function pointer (or index if this is a virtual function). 1133 llvm::Constant *MemPtr[2]; 1134 if (MD->isVirtual()) { 1135 uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD); 1136 uint64_t VTableOffset; 1137 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1138 // Multiply by 4-byte relative offsets. 1139 VTableOffset = Index * 4; 1140 } else { 1141 const ASTContext &Context = getContext(); 1142 CharUnits PointerWidth = Context.toCharUnitsFromBits( 1143 Context.getTargetInfo().getPointerWidth(LangAS::Default)); 1144 VTableOffset = Index * PointerWidth.getQuantity(); 1145 } 1146 1147 if (UseARMMethodPtrABI) { 1148 // ARM C++ ABI 3.2.1: 1149 // This ABI specifies that adj contains twice the this 1150 // adjustment, plus 1 if the member function is virtual. The 1151 // least significant bit of adj then makes exactly the same 1152 // discrimination as the least significant bit of ptr does for 1153 // Itanium. 1154 1155 // We cannot use the Itanium ABI's representation for virtual member 1156 // function pointers under pointer authentication because it would 1157 // require us to store both the virtual offset and the constant 1158 // discriminator in the pointer, which would be immediately vulnerable 1159 // to attack. Instead we introduce a thunk that does the virtual dispatch 1160 // and store it as if it were a non-virtual member function. This means 1161 // that virtual function pointers may not compare equal anymore, but 1162 // fortunately they aren't required to by the standard, and we do make 1163 // a best-effort attempt to re-use the thunk. 1164 // 1165 // To support interoperation with code in which pointer authentication 1166 // is disabled, derefencing a member function pointer must still handle 1167 // the virtual case, but it can use a discriminator which should never 1168 // be valid. 1169 const auto &Schema = 1170 CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers; 1171 if (Schema) 1172 MemPtr[0] = llvm::ConstantExpr::getPtrToInt( 1173 getSignedVirtualMemberFunctionPointer(MD), CGM.PtrDiffTy); 1174 else 1175 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset); 1176 // Don't set the LSB of adj to 1 if pointer authentication for member 1177 // function pointers is enabled. 1178 MemPtr[1] = llvm::ConstantInt::get( 1179 CGM.PtrDiffTy, 2 * ThisAdjustment.getQuantity() + !Schema); 1180 } else { 1181 // Itanium C++ ABI 2.3: 1182 // For a virtual function, [the pointer field] is 1 plus the 1183 // virtual table offset (in bytes) of the function, 1184 // represented as a ptrdiff_t. 1185 MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1); 1186 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 1187 ThisAdjustment.getQuantity()); 1188 } 1189 } else { 1190 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); 1191 llvm::Type *Ty; 1192 // Check whether the function has a computable LLVM signature. 1193 if (Types.isFuncTypeConvertible(FPT)) { 1194 // The function has a computable LLVM signature; use the correct type. 1195 Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD)); 1196 } else { 1197 // Use an arbitrary non-function type to tell GetAddrOfFunction that the 1198 // function type is incomplete. 1199 Ty = CGM.PtrDiffTy; 1200 } 1201 llvm::Constant *addr = CGM.getMemberFunctionPointer(MD, Ty); 1202 1203 MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy); 1204 MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 1205 (UseARMMethodPtrABI ? 2 : 1) * 1206 ThisAdjustment.getQuantity()); 1207 } 1208 1209 return llvm::ConstantStruct::getAnon(MemPtr); 1210 } 1211 1212 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP, 1213 QualType MPType) { 1214 const MemberPointerType *MPT = MPType->castAs<MemberPointerType>(); 1215 const ValueDecl *MPD = MP.getMemberPointerDecl(); 1216 if (!MPD) 1217 return EmitNullMemberPointer(MPT); 1218 1219 CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP); 1220 1221 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) { 1222 llvm::Constant *Src = BuildMemberPointer(MD, ThisAdjustment); 1223 QualType SrcType = getContext().getMemberPointerType( 1224 MD->getType(), MD->getParent()->getTypeForDecl()); 1225 return pointerAuthResignMemberFunctionPointer(Src, MPType, SrcType, CGM); 1226 } 1227 1228 CharUnits FieldOffset = 1229 getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD)); 1230 return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset); 1231 } 1232 1233 /// The comparison algorithm is pretty easy: the member pointers are 1234 /// the same if they're either bitwise identical *or* both null. 1235 /// 1236 /// ARM is different here only because null-ness is more complicated. 1237 llvm::Value * 1238 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF, 1239 llvm::Value *L, 1240 llvm::Value *R, 1241 const MemberPointerType *MPT, 1242 bool Inequality) { 1243 CGBuilderTy &Builder = CGF.Builder; 1244 1245 llvm::ICmpInst::Predicate Eq; 1246 llvm::Instruction::BinaryOps And, Or; 1247 if (Inequality) { 1248 Eq = llvm::ICmpInst::ICMP_NE; 1249 And = llvm::Instruction::Or; 1250 Or = llvm::Instruction::And; 1251 } else { 1252 Eq = llvm::ICmpInst::ICMP_EQ; 1253 And = llvm::Instruction::And; 1254 Or = llvm::Instruction::Or; 1255 } 1256 1257 // Member data pointers are easy because there's a unique null 1258 // value, so it just comes down to bitwise equality. 1259 if (MPT->isMemberDataPointer()) 1260 return Builder.CreateICmp(Eq, L, R); 1261 1262 // For member function pointers, the tautologies are more complex. 1263 // The Itanium tautology is: 1264 // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj)) 1265 // The ARM tautology is: 1266 // (L == R) <==> (L.ptr == R.ptr && 1267 // (L.adj == R.adj || 1268 // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0))) 1269 // The inequality tautologies have exactly the same structure, except 1270 // applying De Morgan's laws. 1271 1272 llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr"); 1273 llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr"); 1274 1275 // This condition tests whether L.ptr == R.ptr. This must always be 1276 // true for equality to hold. 1277 llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr"); 1278 1279 // This condition, together with the assumption that L.ptr == R.ptr, 1280 // tests whether the pointers are both null. ARM imposes an extra 1281 // condition. 1282 llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType()); 1283 llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null"); 1284 1285 // This condition tests whether L.adj == R.adj. If this isn't 1286 // true, the pointers are unequal unless they're both null. 1287 llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj"); 1288 llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj"); 1289 llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj"); 1290 1291 // Null member function pointers on ARM clear the low bit of Adj, 1292 // so the zero condition has to check that neither low bit is set. 1293 if (UseARMMethodPtrABI) { 1294 llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1); 1295 1296 // Compute (l.adj | r.adj) & 1 and test it against zero. 1297 llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj"); 1298 llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One); 1299 llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero, 1300 "cmp.or.adj"); 1301 EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero); 1302 } 1303 1304 // Tie together all our conditions. 1305 llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq); 1306 Result = Builder.CreateBinOp(And, PtrEq, Result, 1307 Inequality ? "memptr.ne" : "memptr.eq"); 1308 return Result; 1309 } 1310 1311 llvm::Value * 1312 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF, 1313 llvm::Value *MemPtr, 1314 const MemberPointerType *MPT) { 1315 CGBuilderTy &Builder = CGF.Builder; 1316 1317 /// For member data pointers, this is just a check against -1. 1318 if (MPT->isMemberDataPointer()) { 1319 assert(MemPtr->getType() == CGM.PtrDiffTy); 1320 llvm::Value *NegativeOne = 1321 llvm::Constant::getAllOnesValue(MemPtr->getType()); 1322 return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool"); 1323 } 1324 1325 // In Itanium, a member function pointer is not null if 'ptr' is not null. 1326 llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr"); 1327 1328 llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0); 1329 llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool"); 1330 1331 // On ARM, a member function pointer is also non-null if the low bit of 'adj' 1332 // (the virtual bit) is set. 1333 if (UseARMMethodPtrABI) { 1334 llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1); 1335 llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj"); 1336 llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit"); 1337 llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero, 1338 "memptr.isvirtual"); 1339 Result = Builder.CreateOr(Result, IsVirtual); 1340 } 1341 1342 return Result; 1343 } 1344 1345 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const { 1346 const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl(); 1347 if (!RD) 1348 return false; 1349 1350 // If C++ prohibits us from making a copy, return by address. 1351 if (!RD->canPassInRegisters()) { 1352 auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType()); 1353 FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false); 1354 return true; 1355 } 1356 return false; 1357 } 1358 1359 /// The Itanium ABI requires non-zero initialization only for data 1360 /// member pointers, for which '0' is a valid offset. 1361 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) { 1362 return MPT->isMemberFunctionPointer(); 1363 } 1364 1365 /// The Itanium ABI always places an offset to the complete object 1366 /// at entry -2 in the vtable. 1367 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF, 1368 const CXXDeleteExpr *DE, 1369 Address Ptr, 1370 QualType ElementType, 1371 const CXXDestructorDecl *Dtor) { 1372 bool UseGlobalDelete = DE->isGlobalDelete(); 1373 if (UseGlobalDelete) { 1374 // Derive the complete-object pointer, which is what we need 1375 // to pass to the deallocation function. 1376 1377 // Grab the vtable pointer as an intptr_t*. 1378 auto *ClassDecl = 1379 cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl()); 1380 llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.UnqualPtrTy, ClassDecl); 1381 1382 // Track back to entry -2 and pull out the offset there. 1383 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64( 1384 CGF.IntPtrTy, VTable, -2, "complete-offset.ptr"); 1385 llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr, 1386 CGF.getPointerAlign()); 1387 1388 // Apply the offset. 1389 llvm::Value *CompletePtr = Ptr.emitRawPointer(CGF); 1390 CompletePtr = 1391 CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset); 1392 1393 // If we're supposed to call the global delete, make sure we do so 1394 // even if the destructor throws. 1395 CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr, 1396 ElementType); 1397 } 1398 1399 // FIXME: Provide a source location here even though there's no 1400 // CXXMemberCallExpr for dtor call. 1401 CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting; 1402 EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE, 1403 /*CallOrInvoke=*/nullptr); 1404 1405 if (UseGlobalDelete) 1406 CGF.PopCleanupBlock(); 1407 } 1408 1409 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) { 1410 // void __cxa_rethrow(); 1411 1412 llvm::FunctionType *FTy = 1413 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 1414 1415 llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow"); 1416 1417 if (isNoReturn) 1418 CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, {}); 1419 else 1420 CGF.EmitRuntimeCallOrInvoke(Fn); 1421 } 1422 1423 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) { 1424 // void *__cxa_allocate_exception(size_t thrown_size); 1425 1426 llvm::FunctionType *FTy = 1427 llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false); 1428 1429 return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception"); 1430 } 1431 1432 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) { 1433 // void __cxa_throw(void *thrown_exception, std::type_info *tinfo, 1434 // void (*dest) (void *)); 1435 1436 llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.GlobalsInt8PtrTy, CGM.Int8PtrTy }; 1437 llvm::FunctionType *FTy = 1438 llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false); 1439 1440 return CGM.CreateRuntimeFunction(FTy, "__cxa_throw"); 1441 } 1442 1443 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) { 1444 QualType ThrowType = E->getSubExpr()->getType(); 1445 // Now allocate the exception object. 1446 llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType()); 1447 uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity(); 1448 1449 llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM); 1450 llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall( 1451 AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception"); 1452 1453 CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment(); 1454 CGF.EmitAnyExprToExn( 1455 E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign)); 1456 1457 // Now throw the exception. 1458 llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType, 1459 /*ForEH=*/true); 1460 1461 // The address of the destructor. If the exception type has a 1462 // trivial destructor (or isn't a record), we just pass null. 1463 llvm::Constant *Dtor = nullptr; 1464 if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) { 1465 CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl()); 1466 if (!Record->hasTrivialDestructor()) { 1467 // __cxa_throw is declared to take its destructor as void (*)(void *). We 1468 // must match that if function pointers can be authenticated with a 1469 // discriminator based on their type. 1470 const ASTContext &Ctx = getContext(); 1471 QualType DtorTy = Ctx.getFunctionType(Ctx.VoidTy, {Ctx.VoidPtrTy}, 1472 FunctionProtoType::ExtProtoInfo()); 1473 1474 CXXDestructorDecl *DtorD = Record->getDestructor(); 1475 Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete)); 1476 Dtor = CGM.getFunctionPointer(Dtor, DtorTy); 1477 } 1478 } 1479 if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy); 1480 1481 llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor }; 1482 CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args); 1483 } 1484 1485 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) { 1486 // void *__dynamic_cast(const void *sub, 1487 // global_as const abi::__class_type_info *src, 1488 // global_as const abi::__class_type_info *dst, 1489 // std::ptrdiff_t src2dst_offset); 1490 1491 llvm::Type *Int8PtrTy = CGF.Int8PtrTy; 1492 llvm::Type *GlobInt8PtrTy = CGF.GlobalsInt8PtrTy; 1493 llvm::Type *PtrDiffTy = 1494 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1495 1496 llvm::Type *Args[4] = { Int8PtrTy, GlobInt8PtrTy, GlobInt8PtrTy, PtrDiffTy }; 1497 1498 llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false); 1499 1500 // Mark the function as nounwind willreturn readonly. 1501 llvm::AttrBuilder FuncAttrs(CGF.getLLVMContext()); 1502 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1503 FuncAttrs.addAttribute(llvm::Attribute::WillReturn); 1504 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly()); 1505 llvm::AttributeList Attrs = llvm::AttributeList::get( 1506 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs); 1507 1508 return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs); 1509 } 1510 1511 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) { 1512 // void __cxa_bad_cast(); 1513 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); 1514 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast"); 1515 } 1516 1517 /// Compute the src2dst_offset hint as described in the 1518 /// Itanium C++ ABI [2.9.7] 1519 static CharUnits computeOffsetHint(ASTContext &Context, 1520 const CXXRecordDecl *Src, 1521 const CXXRecordDecl *Dst) { 1522 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, 1523 /*DetectVirtual=*/false); 1524 1525 // If Dst is not derived from Src we can skip the whole computation below and 1526 // return that Src is not a public base of Dst. Record all inheritance paths. 1527 if (!Dst->isDerivedFrom(Src, Paths)) 1528 return CharUnits::fromQuantity(-2ULL); 1529 1530 unsigned NumPublicPaths = 0; 1531 CharUnits Offset; 1532 1533 // Now walk all possible inheritance paths. 1534 for (const CXXBasePath &Path : Paths) { 1535 if (Path.Access != AS_public) // Ignore non-public inheritance. 1536 continue; 1537 1538 ++NumPublicPaths; 1539 1540 for (const CXXBasePathElement &PathElement : Path) { 1541 // If the path contains a virtual base class we can't give any hint. 1542 // -1: no hint. 1543 if (PathElement.Base->isVirtual()) 1544 return CharUnits::fromQuantity(-1ULL); 1545 1546 if (NumPublicPaths > 1) // Won't use offsets, skip computation. 1547 continue; 1548 1549 // Accumulate the base class offsets. 1550 const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class); 1551 Offset += L.getBaseClassOffset( 1552 PathElement.Base->getType()->getAsCXXRecordDecl()); 1553 } 1554 } 1555 1556 // -2: Src is not a public base of Dst. 1557 if (NumPublicPaths == 0) 1558 return CharUnits::fromQuantity(-2ULL); 1559 1560 // -3: Src is a multiple public base type but never a virtual base type. 1561 if (NumPublicPaths > 1) 1562 return CharUnits::fromQuantity(-3ULL); 1563 1564 // Otherwise, the Src type is a unique public nonvirtual base type of Dst. 1565 // Return the offset of Src from the origin of Dst. 1566 return Offset; 1567 } 1568 1569 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) { 1570 // void __cxa_bad_typeid(); 1571 llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); 1572 1573 return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid"); 1574 } 1575 1576 bool ItaniumCXXABI::shouldTypeidBeNullChecked(QualType SrcRecordTy) { 1577 return true; 1578 } 1579 1580 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) { 1581 llvm::FunctionCallee Fn = getBadTypeidFn(CGF); 1582 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn); 1583 Call->setDoesNotReturn(); 1584 CGF.Builder.CreateUnreachable(); 1585 } 1586 1587 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF, 1588 QualType SrcRecordTy, 1589 Address ThisPtr, 1590 llvm::Type *StdTypeInfoPtrTy) { 1591 auto *ClassDecl = 1592 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl()); 1593 llvm::Value *Value = CGF.GetVTablePtr(ThisPtr, CGM.GlobalsInt8PtrTy, 1594 ClassDecl); 1595 1596 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1597 // Load the type info. 1598 Value = CGF.Builder.CreateCall( 1599 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}), 1600 {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)}); 1601 } else { 1602 // Load the type info. 1603 Value = 1604 CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL); 1605 } 1606 return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value, 1607 CGF.getPointerAlign()); 1608 } 1609 1610 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, 1611 QualType SrcRecordTy) { 1612 return SrcIsPtr; 1613 } 1614 1615 llvm::Value *ItaniumCXXABI::emitDynamicCastCall( 1616 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy, 1617 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) { 1618 llvm::Type *PtrDiffLTy = 1619 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1620 1621 llvm::Value *SrcRTTI = 1622 CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType()); 1623 llvm::Value *DestRTTI = 1624 CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType()); 1625 1626 // Compute the offset hint. 1627 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); 1628 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl(); 1629 llvm::Value *OffsetHint = llvm::ConstantInt::get( 1630 PtrDiffLTy, 1631 computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity()); 1632 1633 // Emit the call to __dynamic_cast. 1634 llvm::Value *Value = ThisAddr.emitRawPointer(CGF); 1635 if (CGM.getCodeGenOpts().PointerAuth.CXXVTablePointers) { 1636 // We perform a no-op load of the vtable pointer here to force an 1637 // authentication. In environments that do not support pointer 1638 // authentication this is a an actual no-op that will be elided. When 1639 // pointer authentication is supported and enforced on vtable pointers this 1640 // load can trap. 1641 llvm::Value *Vtable = 1642 CGF.GetVTablePtr(ThisAddr, CGM.Int8PtrTy, SrcDecl, 1643 CodeGenFunction::VTableAuthMode::MustTrap); 1644 assert(Vtable); 1645 (void)Vtable; 1646 } 1647 1648 llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint}; 1649 Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args); 1650 1651 /// C++ [expr.dynamic.cast]p9: 1652 /// A failed cast to reference type throws std::bad_cast 1653 if (DestTy->isReferenceType()) { 1654 llvm::BasicBlock *BadCastBlock = 1655 CGF.createBasicBlock("dynamic_cast.bad_cast"); 1656 1657 llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value); 1658 CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd); 1659 1660 CGF.EmitBlock(BadCastBlock); 1661 EmitBadCastCall(CGF); 1662 } 1663 1664 return Value; 1665 } 1666 1667 llvm::Value *ItaniumCXXABI::emitExactDynamicCast( 1668 CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy, 1669 QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastSuccess, 1670 llvm::BasicBlock *CastFail) { 1671 ASTContext &Context = getContext(); 1672 1673 // Find all the inheritance paths. 1674 const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); 1675 const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl(); 1676 CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, 1677 /*DetectVirtual=*/false); 1678 (void)DestDecl->isDerivedFrom(SrcDecl, Paths); 1679 1680 // Find an offset within `DestDecl` where a `SrcDecl` instance and its vptr 1681 // might appear. 1682 std::optional<CharUnits> Offset; 1683 for (const CXXBasePath &Path : Paths) { 1684 // dynamic_cast only finds public inheritance paths. 1685 if (Path.Access != AS_public) 1686 continue; 1687 1688 CharUnits PathOffset; 1689 for (const CXXBasePathElement &PathElement : Path) { 1690 // Find the offset along this inheritance step. 1691 const CXXRecordDecl *Base = 1692 PathElement.Base->getType()->getAsCXXRecordDecl(); 1693 if (PathElement.Base->isVirtual()) { 1694 // For a virtual base class, we know that the derived class is exactly 1695 // DestDecl, so we can use the vbase offset from its layout. 1696 const ASTRecordLayout &L = Context.getASTRecordLayout(DestDecl); 1697 PathOffset = L.getVBaseClassOffset(Base); 1698 } else { 1699 const ASTRecordLayout &L = 1700 Context.getASTRecordLayout(PathElement.Class); 1701 PathOffset += L.getBaseClassOffset(Base); 1702 } 1703 } 1704 1705 if (!Offset) 1706 Offset = PathOffset; 1707 else if (Offset != PathOffset) { 1708 // Base appears in at least two different places. Find the most-derived 1709 // object and see if it's a DestDecl. Note that the most-derived object 1710 // must be at least as aligned as this base class subobject, and must 1711 // have a vptr at offset 0. 1712 ThisAddr = Address(emitDynamicCastToVoid(CGF, ThisAddr, SrcRecordTy), 1713 CGF.VoidPtrTy, ThisAddr.getAlignment()); 1714 SrcDecl = DestDecl; 1715 Offset = CharUnits::Zero(); 1716 break; 1717 } 1718 } 1719 1720 if (!Offset) { 1721 // If there are no public inheritance paths, the cast always fails. 1722 CGF.EmitBranch(CastFail); 1723 return llvm::PoisonValue::get(CGF.VoidPtrTy); 1724 } 1725 1726 // Compare the vptr against the expected vptr for the destination type at 1727 // this offset. Note that we do not know what type ThisAddr points to in 1728 // the case where the derived class multiply inherits from the base class 1729 // so we can't use GetVTablePtr, so we load the vptr directly instead. 1730 llvm::Instruction *VPtr = CGF.Builder.CreateLoad( 1731 ThisAddr.withElementType(CGF.VoidPtrPtrTy), "vtable"); 1732 CGM.DecorateInstructionWithTBAA( 1733 VPtr, CGM.getTBAAVTablePtrAccessInfo(CGF.VoidPtrPtrTy)); 1734 llvm::Value *Success = CGF.Builder.CreateICmpEQ( 1735 VPtr, getVTableAddressPoint(BaseSubobject(SrcDecl, *Offset), DestDecl)); 1736 llvm::Value *Result = ThisAddr.emitRawPointer(CGF); 1737 if (!Offset->isZero()) 1738 Result = CGF.Builder.CreateInBoundsGEP( 1739 CGF.CharTy, Result, 1740 {llvm::ConstantInt::get(CGF.PtrDiffTy, -Offset->getQuantity())}); 1741 CGF.Builder.CreateCondBr(Success, CastSuccess, CastFail); 1742 return Result; 1743 } 1744 1745 llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF, 1746 Address ThisAddr, 1747 QualType SrcRecordTy) { 1748 auto *ClassDecl = 1749 cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl()); 1750 llvm::Value *OffsetToTop; 1751 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1752 // Get the vtable pointer. 1753 llvm::Value *VTable = 1754 CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl); 1755 1756 // Get the offset-to-top from the vtable. 1757 OffsetToTop = 1758 CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U); 1759 OffsetToTop = CGF.Builder.CreateAlignedLoad( 1760 CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top"); 1761 } else { 1762 llvm::Type *PtrDiffLTy = 1763 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 1764 1765 // Get the vtable pointer. 1766 llvm::Value *VTable = 1767 CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl); 1768 1769 // Get the offset-to-top from the vtable. 1770 OffsetToTop = 1771 CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL); 1772 OffsetToTop = CGF.Builder.CreateAlignedLoad( 1773 PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top"); 1774 } 1775 // Finally, add the offset to the pointer. 1776 return CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ThisAddr.emitRawPointer(CGF), 1777 OffsetToTop); 1778 } 1779 1780 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) { 1781 llvm::FunctionCallee Fn = getBadCastFn(CGF); 1782 llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn); 1783 Call->setDoesNotReturn(); 1784 CGF.Builder.CreateUnreachable(); 1785 return true; 1786 } 1787 1788 llvm::Value * 1789 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF, 1790 Address This, 1791 const CXXRecordDecl *ClassDecl, 1792 const CXXRecordDecl *BaseClassDecl) { 1793 llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl); 1794 CharUnits VBaseOffsetOffset = 1795 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl, 1796 BaseClassDecl); 1797 llvm::Value *VBaseOffsetPtr = 1798 CGF.Builder.CreateConstGEP1_64( 1799 CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(), 1800 "vbase.offset.ptr"); 1801 1802 llvm::Value *VBaseOffset; 1803 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 1804 VBaseOffset = CGF.Builder.CreateAlignedLoad( 1805 CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4), 1806 "vbase.offset"); 1807 } else { 1808 VBaseOffset = CGF.Builder.CreateAlignedLoad( 1809 CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset"); 1810 } 1811 return VBaseOffset; 1812 } 1813 1814 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) { 1815 // Just make sure we're in sync with TargetCXXABI. 1816 assert(CGM.getTarget().getCXXABI().hasConstructorVariants()); 1817 1818 // The constructor used for constructing this as a base class; 1819 // ignores virtual bases. 1820 CGM.EmitGlobal(GlobalDecl(D, Ctor_Base)); 1821 1822 // The constructor used for constructing this as a complete class; 1823 // constructs the virtual bases, then calls the base constructor. 1824 if (!D->getParent()->isAbstract()) { 1825 // We don't need to emit the complete ctor if the class is abstract. 1826 CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete)); 1827 } 1828 } 1829 1830 CGCXXABI::AddedStructorArgCounts 1831 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD, 1832 SmallVectorImpl<CanQualType> &ArgTys) { 1833 ASTContext &Context = getContext(); 1834 1835 // All parameters are already in place except VTT, which goes after 'this'. 1836 // These are Clang types, so we don't need to worry about sret yet. 1837 1838 // Check if we need to add a VTT parameter (which has type global void **). 1839 if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base 1840 : GD.getDtorType() == Dtor_Base) && 1841 cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) { 1842 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr); 1843 QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS); 1844 ArgTys.insert(ArgTys.begin() + 1, 1845 Context.getPointerType(CanQualType::CreateUnsafe(Q))); 1846 return AddedStructorArgCounts::prefix(1); 1847 } 1848 return AddedStructorArgCounts{}; 1849 } 1850 1851 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) { 1852 // The destructor used for destructing this as a base class; ignores 1853 // virtual bases. 1854 CGM.EmitGlobal(GlobalDecl(D, Dtor_Base)); 1855 1856 // The destructor used for destructing this as a most-derived class; 1857 // call the base destructor and then destructs any virtual bases. 1858 CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete)); 1859 1860 // The destructor in a virtual table is always a 'deleting' 1861 // destructor, which calls the complete destructor and then uses the 1862 // appropriate operator delete. 1863 if (D->isVirtual()) 1864 CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting)); 1865 } 1866 1867 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF, 1868 QualType &ResTy, 1869 FunctionArgList &Params) { 1870 const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl()); 1871 assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)); 1872 1873 // Check if we need a VTT parameter as well. 1874 if (NeedsVTTParameter(CGF.CurGD)) { 1875 ASTContext &Context = getContext(); 1876 1877 // FIXME: avoid the fake decl 1878 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr); 1879 QualType Q = Context.getAddrSpaceQualType(Context.VoidPtrTy, AS); 1880 QualType T = Context.getPointerType(Q); 1881 auto *VTTDecl = ImplicitParamDecl::Create( 1882 Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"), 1883 T, ImplicitParamKind::CXXVTT); 1884 Params.insert(Params.begin() + 1, VTTDecl); 1885 getStructorImplicitParamDecl(CGF) = VTTDecl; 1886 } 1887 } 1888 1889 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) { 1890 // Naked functions have no prolog. 1891 if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>()) 1892 return; 1893 1894 /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue 1895 /// adjustments are required, because they are all handled by thunks. 1896 setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF)); 1897 1898 /// Initialize the 'vtt' slot if needed. 1899 if (getStructorImplicitParamDecl(CGF)) { 1900 getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad( 1901 CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt"); 1902 } 1903 1904 /// If this is a function that the ABI specifies returns 'this', initialize 1905 /// the return slot to 'this' at the start of the function. 1906 /// 1907 /// Unlike the setting of return types, this is done within the ABI 1908 /// implementation instead of by clients of CGCXXABI because: 1909 /// 1) getThisValue is currently protected 1910 /// 2) in theory, an ABI could implement 'this' returns some other way; 1911 /// HasThisReturn only specifies a contract, not the implementation 1912 if (HasThisReturn(CGF.CurGD)) 1913 CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue); 1914 } 1915 1916 CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs( 1917 CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type, 1918 bool ForVirtualBase, bool Delegating) { 1919 if (!NeedsVTTParameter(GlobalDecl(D, Type))) 1920 return AddedStructorArgs{}; 1921 1922 // Insert the implicit 'vtt' argument as the second argument. Make sure to 1923 // correctly reflect its address space, which can differ from generic on 1924 // some targets. 1925 llvm::Value *VTT = 1926 CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating); 1927 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr); 1928 QualType Q = getContext().getAddrSpaceQualType(getContext().VoidPtrTy, AS); 1929 QualType VTTTy = getContext().getPointerType(Q); 1930 return AddedStructorArgs::prefix({{VTT, VTTTy}}); 1931 } 1932 1933 llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam( 1934 CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, 1935 bool ForVirtualBase, bool Delegating) { 1936 GlobalDecl GD(DD, Type); 1937 return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating); 1938 } 1939 1940 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF, 1941 const CXXDestructorDecl *DD, 1942 CXXDtorType Type, bool ForVirtualBase, 1943 bool Delegating, Address This, 1944 QualType ThisTy) { 1945 GlobalDecl GD(DD, Type); 1946 llvm::Value *VTT = 1947 getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating); 1948 QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); 1949 1950 CGCallee Callee; 1951 if (getContext().getLangOpts().AppleKext && 1952 Type != Dtor_Base && DD->isVirtual()) 1953 Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent()); 1954 else 1955 Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD); 1956 1957 CGF.EmitCXXDestructorCall(GD, Callee, CGF.getAsNaturalPointerTo(This, ThisTy), 1958 ThisTy, VTT, VTTTy, nullptr); 1959 } 1960 1961 // Check if any non-inline method has the specified attribute. 1962 template <typename T> 1963 static bool CXXRecordNonInlineHasAttr(const CXXRecordDecl *RD) { 1964 for (const auto *D : RD->noload_decls()) { 1965 if (const auto *FD = dyn_cast<FunctionDecl>(D)) { 1966 if (FD->isInlined() || FD->doesThisDeclarationHaveABody() || 1967 FD->isPureVirtual()) 1968 continue; 1969 if (D->hasAttr<T>()) 1970 return true; 1971 } 1972 } 1973 1974 return false; 1975 } 1976 1977 static void setVTableSelectiveDLLImportExport(CodeGenModule &CGM, 1978 llvm::GlobalVariable *VTable, 1979 const CXXRecordDecl *RD) { 1980 if (VTable->getDLLStorageClass() != 1981 llvm::GlobalVariable::DefaultStorageClass || 1982 RD->hasAttr<DLLImportAttr>() || RD->hasAttr<DLLExportAttr>()) 1983 return; 1984 1985 if (CGM.getVTables().isVTableExternal(RD)) { 1986 if (CXXRecordNonInlineHasAttr<DLLImportAttr>(RD)) 1987 VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); 1988 } else if (CXXRecordNonInlineHasAttr<DLLExportAttr>(RD)) 1989 VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); 1990 } 1991 1992 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT, 1993 const CXXRecordDecl *RD) { 1994 llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits()); 1995 if (VTable->hasInitializer()) 1996 return; 1997 1998 ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext(); 1999 const VTableLayout &VTLayout = VTContext.getVTableLayout(RD); 2000 llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD); 2001 llvm::Constant *RTTI = 2002 CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD)); 2003 2004 // Create and set the initializer. 2005 ConstantInitBuilder builder(CGM); 2006 auto components = builder.beginStruct(); 2007 CGVT.createVTableInitializer(components, VTLayout, RTTI, 2008 llvm::GlobalValue::isLocalLinkage(Linkage)); 2009 components.finishAndSetAsInitializer(VTable); 2010 2011 // Set the correct linkage. 2012 VTable->setLinkage(Linkage); 2013 2014 if (CGM.supportsCOMDAT() && VTable->isWeakForLinker()) 2015 VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName())); 2016 2017 if (CGM.getTarget().hasPS4DLLImportExport()) 2018 setVTableSelectiveDLLImportExport(CGM, VTable, RD); 2019 2020 // Set the right visibility. 2021 CGM.setGVProperties(VTable, RD); 2022 2023 // If this is the magic class __cxxabiv1::__fundamental_type_info, 2024 // we will emit the typeinfo for the fundamental types. This is the 2025 // same behaviour as GCC. 2026 const DeclContext *DC = RD->getDeclContext(); 2027 if (RD->getIdentifier() && 2028 RD->getIdentifier()->isStr("__fundamental_type_info") && 2029 isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() && 2030 cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") && 2031 DC->getParent()->isTranslationUnit()) 2032 EmitFundamentalRTTIDescriptors(RD); 2033 2034 // Always emit type metadata on non-available_externally definitions, and on 2035 // available_externally definitions if we are performing whole program 2036 // devirtualization. For WPD we need the type metadata on all vtable 2037 // definitions to ensure we associate derived classes with base classes 2038 // defined in headers but with a strong definition only in a shared library. 2039 if (!VTable->isDeclarationForLinker() || 2040 CGM.getCodeGenOpts().WholeProgramVTables) { 2041 CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout); 2042 // For available_externally definitions, add the vtable to 2043 // @llvm.compiler.used so that it isn't deleted before whole program 2044 // analysis. 2045 if (VTable->isDeclarationForLinker()) { 2046 assert(CGM.getCodeGenOpts().WholeProgramVTables); 2047 CGM.addCompilerUsedGlobal(VTable); 2048 } 2049 } 2050 2051 if (VTContext.isRelativeLayout()) { 2052 CGVT.RemoveHwasanMetadata(VTable); 2053 if (!VTable->isDSOLocal()) 2054 CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName()); 2055 } 2056 } 2057 2058 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField( 2059 CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) { 2060 if (Vptr.NearestVBase == nullptr) 2061 return false; 2062 return NeedsVTTParameter(CGF.CurGD); 2063 } 2064 2065 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor( 2066 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, 2067 const CXXRecordDecl *NearestVBase) { 2068 2069 if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && 2070 NeedsVTTParameter(CGF.CurGD)) { 2071 return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base, 2072 NearestVBase); 2073 } 2074 return getVTableAddressPoint(Base, VTableClass); 2075 } 2076 2077 llvm::Constant * 2078 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base, 2079 const CXXRecordDecl *VTableClass) { 2080 llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits()); 2081 2082 // Find the appropriate vtable within the vtable group, and the address point 2083 // within that vtable. 2084 const VTableLayout &Layout = 2085 CGM.getItaniumVTableContext().getVTableLayout(VTableClass); 2086 VTableLayout::AddressPointLocation AddressPoint = 2087 Layout.getAddressPoint(Base); 2088 llvm::Value *Indices[] = { 2089 llvm::ConstantInt::get(CGM.Int32Ty, 0), 2090 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex), 2091 llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex), 2092 }; 2093 2094 // Add inrange attribute to indicate that only the VTableIndex can be 2095 // accessed. 2096 unsigned ComponentSize = 2097 CGM.getDataLayout().getTypeAllocSize(CGM.getVTableComponentType()); 2098 unsigned VTableSize = 2099 ComponentSize * Layout.getVTableSize(AddressPoint.VTableIndex); 2100 unsigned Offset = ComponentSize * AddressPoint.AddressPointIndex; 2101 llvm::ConstantRange InRange( 2102 llvm::APInt(32, (int)-Offset, true), 2103 llvm::APInt(32, (int)(VTableSize - Offset), true)); 2104 return llvm::ConstantExpr::getGetElementPtr( 2105 VTable->getValueType(), VTable, Indices, /*InBounds=*/true, InRange); 2106 } 2107 2108 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT( 2109 CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, 2110 const CXXRecordDecl *NearestVBase) { 2111 assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) && 2112 NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT"); 2113 2114 // Get the secondary vpointer index. 2115 uint64_t VirtualPointerIndex = 2116 CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base); 2117 2118 /// Load the VTT. 2119 llvm::Value *VTT = CGF.LoadCXXVTT(); 2120 if (VirtualPointerIndex) 2121 VTT = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.GlobalsVoidPtrTy, VTT, 2122 VirtualPointerIndex); 2123 2124 // And load the address point from the VTT. 2125 llvm::Value *AP = 2126 CGF.Builder.CreateAlignedLoad(CGF.GlobalsVoidPtrTy, VTT, 2127 CGF.getPointerAlign()); 2128 2129 if (auto &Schema = CGF.CGM.getCodeGenOpts().PointerAuth.CXXVTTVTablePointers) { 2130 CGPointerAuthInfo PointerAuth = CGF.EmitPointerAuthInfo(Schema, VTT, 2131 GlobalDecl(), 2132 QualType()); 2133 AP = CGF.EmitPointerAuthAuth(PointerAuth, AP); 2134 } 2135 2136 return AP; 2137 } 2138 2139 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, 2140 CharUnits VPtrOffset) { 2141 assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets"); 2142 2143 llvm::GlobalVariable *&VTable = VTables[RD]; 2144 if (VTable) 2145 return VTable; 2146 2147 // Queue up this vtable for possible deferred emission. 2148 CGM.addDeferredVTable(RD); 2149 2150 SmallString<256> Name; 2151 llvm::raw_svector_ostream Out(Name); 2152 getMangleContext().mangleCXXVTable(RD, Out); 2153 2154 const VTableLayout &VTLayout = 2155 CGM.getItaniumVTableContext().getVTableLayout(RD); 2156 llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout); 2157 2158 // Use pointer to global alignment for the vtable. Otherwise we would align 2159 // them based on the size of the initializer which doesn't make sense as only 2160 // single values are read. 2161 LangAS AS = CGM.GetGlobalVarAddressSpace(nullptr); 2162 unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout() 2163 ? 32 2164 : CGM.getTarget().getPointerAlign(AS); 2165 2166 VTable = CGM.CreateOrReplaceCXXRuntimeVariable( 2167 Name, VTableType, llvm::GlobalValue::ExternalLinkage, 2168 getContext().toCharUnitsFromBits(PAlign).getAsAlign()); 2169 VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 2170 2171 if (CGM.getTarget().hasPS4DLLImportExport()) 2172 setVTableSelectiveDLLImportExport(CGM, VTable, RD); 2173 2174 CGM.setGVProperties(VTable, RD); 2175 return VTable; 2176 } 2177 2178 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF, 2179 GlobalDecl GD, 2180 Address This, 2181 llvm::Type *Ty, 2182 SourceLocation Loc) { 2183 llvm::Type *PtrTy = CGM.GlobalsInt8PtrTy; 2184 auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl()); 2185 llvm::Value *VTable = CGF.GetVTablePtr(This, PtrTy, MethodDecl->getParent()); 2186 2187 uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD); 2188 llvm::Value *VFunc, *VTableSlotPtr = nullptr; 2189 auto &Schema = CGM.getCodeGenOpts().PointerAuth.CXXVirtualFunctionPointers; 2190 if (!Schema && CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) { 2191 VFunc = CGF.EmitVTableTypeCheckedLoad( 2192 MethodDecl->getParent(), VTable, PtrTy, 2193 VTableIndex * 2194 CGM.getContext().getTargetInfo().getPointerWidth(LangAS::Default) / 2195 8); 2196 } else { 2197 CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc); 2198 2199 llvm::Value *VFuncLoad; 2200 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 2201 VFuncLoad = CGF.Builder.CreateCall( 2202 CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}), 2203 {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)}); 2204 } else { 2205 VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64( 2206 PtrTy, VTable, VTableIndex, "vfn"); 2207 VFuncLoad = CGF.Builder.CreateAlignedLoad(PtrTy, VTableSlotPtr, 2208 CGF.getPointerAlign()); 2209 } 2210 2211 // Add !invariant.load md to virtual function load to indicate that 2212 // function didn't change inside vtable. 2213 // It's safe to add it without -fstrict-vtable-pointers, but it would not 2214 // help in devirtualization because it will only matter if we will have 2 2215 // the same virtual function loads from the same vtable load, which won't 2216 // happen without enabled devirtualization with -fstrict-vtable-pointers. 2217 if (CGM.getCodeGenOpts().OptimizationLevel > 0 && 2218 CGM.getCodeGenOpts().StrictVTablePointers) { 2219 if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) { 2220 VFuncLoadInstr->setMetadata( 2221 llvm::LLVMContext::MD_invariant_load, 2222 llvm::MDNode::get(CGM.getLLVMContext(), 2223 llvm::ArrayRef<llvm::Metadata *>())); 2224 } 2225 } 2226 VFunc = VFuncLoad; 2227 } 2228 2229 CGPointerAuthInfo PointerAuth; 2230 if (Schema) { 2231 assert(VTableSlotPtr && "virtual function pointer not set"); 2232 GD = CGM.getItaniumVTableContext().findOriginalMethod(GD.getCanonicalDecl()); 2233 PointerAuth = CGF.EmitPointerAuthInfo(Schema, VTableSlotPtr, GD, QualType()); 2234 } 2235 CGCallee Callee(GD, VFunc, PointerAuth); 2236 return Callee; 2237 } 2238 2239 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall( 2240 CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType, 2241 Address This, DeleteOrMemberCallExpr E, llvm::CallBase **CallOrInvoke) { 2242 auto *CE = E.dyn_cast<const CXXMemberCallExpr *>(); 2243 auto *D = E.dyn_cast<const CXXDeleteExpr *>(); 2244 assert((CE != nullptr) ^ (D != nullptr)); 2245 assert(CE == nullptr || CE->arg_begin() == CE->arg_end()); 2246 assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete); 2247 2248 GlobalDecl GD(Dtor, DtorType); 2249 const CGFunctionInfo *FInfo = 2250 &CGM.getTypes().arrangeCXXStructorDeclaration(GD); 2251 llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo); 2252 CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty); 2253 2254 QualType ThisTy; 2255 if (CE) { 2256 ThisTy = CE->getObjectType(); 2257 } else { 2258 ThisTy = D->getDestroyedType(); 2259 } 2260 2261 CGF.EmitCXXDestructorCall(GD, Callee, This.emitRawPointer(CGF), ThisTy, 2262 nullptr, QualType(), nullptr, CallOrInvoke); 2263 return nullptr; 2264 } 2265 2266 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) { 2267 CodeGenVTables &VTables = CGM.getVTables(); 2268 llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD); 2269 VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD); 2270 } 2271 2272 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass( 2273 const CXXRecordDecl *RD) const { 2274 // We don't emit available_externally vtables if we are in -fapple-kext mode 2275 // because kext mode does not permit devirtualization. 2276 if (CGM.getLangOpts().AppleKext) 2277 return false; 2278 2279 // If the vtable is hidden then it is not safe to emit an available_externally 2280 // copy of vtable. 2281 if (isVTableHidden(RD)) 2282 return false; 2283 2284 if (CGM.getCodeGenOpts().ForceEmitVTables) 2285 return true; 2286 2287 // A speculative vtable can only be generated if all virtual inline functions 2288 // defined by this class are emitted. The vtable in the final program contains 2289 // for each virtual inline function not used in the current TU a function that 2290 // is equivalent to the unused function. The function in the actual vtable 2291 // does not have to be declared under the same symbol (e.g., a virtual 2292 // destructor that can be substituted with its base class's destructor). Since 2293 // inline functions are emitted lazily and this emissions does not account for 2294 // speculative emission of a vtable, we might generate a speculative vtable 2295 // with references to inline functions that are not emitted under that name. 2296 // This can lead to problems when devirtualizing a call to such a function, 2297 // that result in linking errors. Hence, if there are any unused virtual 2298 // inline function, we cannot emit the speculative vtable. 2299 // FIXME we can still emit a copy of the vtable if we 2300 // can emit definition of the inline functions. 2301 if (hasAnyUnusedVirtualInlineFunction(RD)) 2302 return false; 2303 2304 // For a class with virtual bases, we must also be able to speculatively 2305 // emit the VTT, because CodeGen doesn't have separate notions of "can emit 2306 // the vtable" and "can emit the VTT". For a base subobject, this means we 2307 // need to be able to emit non-virtual base vtables. 2308 if (RD->getNumVBases()) { 2309 for (const auto &B : RD->bases()) { 2310 auto *BRD = B.getType()->getAsCXXRecordDecl(); 2311 assert(BRD && "no class for base specifier"); 2312 if (B.isVirtual() || !BRD->isDynamicClass()) 2313 continue; 2314 if (!canSpeculativelyEmitVTableAsBaseClass(BRD)) 2315 return false; 2316 } 2317 } 2318 2319 return true; 2320 } 2321 2322 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const { 2323 if (!canSpeculativelyEmitVTableAsBaseClass(RD)) 2324 return false; 2325 2326 if (RD->shouldEmitInExternalSource()) 2327 return false; 2328 2329 // For a complete-object vtable (or more specifically, for the VTT), we need 2330 // to be able to speculatively emit the vtables of all dynamic virtual bases. 2331 for (const auto &B : RD->vbases()) { 2332 auto *BRD = B.getType()->getAsCXXRecordDecl(); 2333 assert(BRD && "no class for base specifier"); 2334 if (!BRD->isDynamicClass()) 2335 continue; 2336 if (!canSpeculativelyEmitVTableAsBaseClass(BRD)) 2337 return false; 2338 } 2339 2340 return true; 2341 } 2342 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF, 2343 Address InitialPtr, 2344 const CXXRecordDecl *UnadjustedClass, 2345 int64_t NonVirtualAdjustment, 2346 int64_t VirtualAdjustment, 2347 bool IsReturnAdjustment) { 2348 if (!NonVirtualAdjustment && !VirtualAdjustment) 2349 return InitialPtr.emitRawPointer(CGF); 2350 2351 Address V = InitialPtr.withElementType(CGF.Int8Ty); 2352 2353 // In a base-to-derived cast, the non-virtual adjustment is applied first. 2354 if (NonVirtualAdjustment && !IsReturnAdjustment) { 2355 V = CGF.Builder.CreateConstInBoundsByteGEP(V, 2356 CharUnits::fromQuantity(NonVirtualAdjustment)); 2357 } 2358 2359 // Perform the virtual adjustment if we have one. 2360 llvm::Value *ResultPtr; 2361 if (VirtualAdjustment) { 2362 llvm::Value *VTablePtr = 2363 CGF.GetVTablePtr(V, CGF.Int8PtrTy, UnadjustedClass); 2364 2365 llvm::Value *Offset; 2366 llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64( 2367 CGF.Int8Ty, VTablePtr, VirtualAdjustment); 2368 if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) { 2369 // Load the adjustment offset from the vtable as a 32-bit int. 2370 Offset = 2371 CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr, 2372 CharUnits::fromQuantity(4)); 2373 } else { 2374 llvm::Type *PtrDiffTy = 2375 CGF.ConvertType(CGF.getContext().getPointerDiffType()); 2376 2377 // Load the adjustment offset from the vtable. 2378 Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr, 2379 CGF.getPointerAlign()); 2380 } 2381 // Adjust our pointer. 2382 ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getElementType(), 2383 V.emitRawPointer(CGF), Offset); 2384 } else { 2385 ResultPtr = V.emitRawPointer(CGF); 2386 } 2387 2388 // In a derived-to-base conversion, the non-virtual adjustment is 2389 // applied second. 2390 if (NonVirtualAdjustment && IsReturnAdjustment) { 2391 ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr, 2392 NonVirtualAdjustment); 2393 } 2394 2395 return ResultPtr; 2396 } 2397 2398 llvm::Value * 2399 ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF, Address This, 2400 const CXXRecordDecl *UnadjustedClass, 2401 const ThunkInfo &TI) { 2402 return performTypeAdjustment(CGF, This, UnadjustedClass, TI.This.NonVirtual, 2403 TI.This.Virtual.Itanium.VCallOffsetOffset, 2404 /*IsReturnAdjustment=*/false); 2405 } 2406 2407 llvm::Value * 2408 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret, 2409 const CXXRecordDecl *UnadjustedClass, 2410 const ReturnAdjustment &RA) { 2411 return performTypeAdjustment(CGF, Ret, UnadjustedClass, RA.NonVirtual, 2412 RA.Virtual.Itanium.VBaseOffsetOffset, 2413 /*IsReturnAdjustment=*/true); 2414 } 2415 2416 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF, 2417 RValue RV, QualType ResultType) { 2418 if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl())) 2419 return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType); 2420 2421 // Destructor thunks in the ARM ABI have indeterminate results. 2422 llvm::Type *T = CGF.ReturnValue.getElementType(); 2423 RValue Undef = RValue::get(llvm::UndefValue::get(T)); 2424 return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType); 2425 } 2426 2427 /************************** Array allocation cookies **************************/ 2428 2429 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) { 2430 // The array cookie is a size_t; pad that up to the element alignment. 2431 // The cookie is actually right-justified in that space. 2432 return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes), 2433 CGM.getContext().getPreferredTypeAlignInChars(elementType)); 2434 } 2435 2436 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 2437 Address NewPtr, 2438 llvm::Value *NumElements, 2439 const CXXNewExpr *expr, 2440 QualType ElementType) { 2441 assert(requiresArrayCookie(expr)); 2442 2443 unsigned AS = NewPtr.getAddressSpace(); 2444 2445 ASTContext &Ctx = getContext(); 2446 CharUnits SizeSize = CGF.getSizeSize(); 2447 2448 // The size of the cookie. 2449 CharUnits CookieSize = 2450 std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType)); 2451 assert(CookieSize == getArrayCookieSizeImpl(ElementType)); 2452 2453 // Compute an offset to the cookie. 2454 Address CookiePtr = NewPtr; 2455 CharUnits CookieOffset = CookieSize - SizeSize; 2456 if (!CookieOffset.isZero()) 2457 CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset); 2458 2459 // Write the number of elements into the appropriate slot. 2460 Address NumElementsPtr = CookiePtr.withElementType(CGF.SizeTy); 2461 llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr); 2462 2463 // Handle the array cookie specially in ASan. 2464 if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 && 2465 (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() || 2466 CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) { 2467 // The store to the CookiePtr does not need to be instrumented. 2468 SI->setNoSanitizeMetadata(); 2469 llvm::FunctionType *FTy = 2470 llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false); 2471 llvm::FunctionCallee F = 2472 CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie"); 2473 CGF.Builder.CreateCall(F, NumElementsPtr.emitRawPointer(CGF)); 2474 } 2475 2476 // Finally, compute a pointer to the actual data buffer by skipping 2477 // over the cookie completely. 2478 return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize); 2479 } 2480 2481 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 2482 Address allocPtr, 2483 CharUnits cookieSize) { 2484 // The element size is right-justified in the cookie. 2485 Address numElementsPtr = allocPtr; 2486 CharUnits numElementsOffset = cookieSize - CGF.getSizeSize(); 2487 if (!numElementsOffset.isZero()) 2488 numElementsPtr = 2489 CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset); 2490 2491 unsigned AS = allocPtr.getAddressSpace(); 2492 numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy); 2493 if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0) 2494 return CGF.Builder.CreateLoad(numElementsPtr); 2495 // In asan mode emit a function call instead of a regular load and let the 2496 // run-time deal with it: if the shadow is properly poisoned return the 2497 // cookie, otherwise return 0 to avoid an infinite loop calling DTORs. 2498 // We can't simply ignore this load using nosanitize metadata because 2499 // the metadata may be lost. 2500 llvm::FunctionType *FTy = 2501 llvm::FunctionType::get(CGF.SizeTy, CGF.UnqualPtrTy, false); 2502 llvm::FunctionCallee F = 2503 CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie"); 2504 return CGF.Builder.CreateCall(F, numElementsPtr.emitRawPointer(CGF)); 2505 } 2506 2507 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) { 2508 // ARM says that the cookie is always: 2509 // struct array_cookie { 2510 // std::size_t element_size; // element_size != 0 2511 // std::size_t element_count; 2512 // }; 2513 // But the base ABI doesn't give anything an alignment greater than 2514 // 8, so we can dismiss this as typical ABI-author blindness to 2515 // actual language complexity and round up to the element alignment. 2516 return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes), 2517 CGM.getContext().getTypeAlignInChars(elementType)); 2518 } 2519 2520 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, 2521 Address newPtr, 2522 llvm::Value *numElements, 2523 const CXXNewExpr *expr, 2524 QualType elementType) { 2525 assert(requiresArrayCookie(expr)); 2526 2527 // The cookie is always at the start of the buffer. 2528 Address cookie = newPtr; 2529 2530 // The first element is the element size. 2531 cookie = cookie.withElementType(CGF.SizeTy); 2532 llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy, 2533 getContext().getTypeSizeInChars(elementType).getQuantity()); 2534 CGF.Builder.CreateStore(elementSize, cookie); 2535 2536 // The second element is the element count. 2537 cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1); 2538 CGF.Builder.CreateStore(numElements, cookie); 2539 2540 // Finally, compute a pointer to the actual data buffer by skipping 2541 // over the cookie completely. 2542 CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType); 2543 return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize); 2544 } 2545 2546 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, 2547 Address allocPtr, 2548 CharUnits cookieSize) { 2549 // The number of elements is at offset sizeof(size_t) relative to 2550 // the allocated pointer. 2551 Address numElementsPtr 2552 = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize()); 2553 2554 numElementsPtr = numElementsPtr.withElementType(CGF.SizeTy); 2555 return CGF.Builder.CreateLoad(numElementsPtr); 2556 } 2557 2558 /*********************** Static local initialization **************************/ 2559 2560 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM, 2561 llvm::PointerType *GuardPtrTy) { 2562 // int __cxa_guard_acquire(__guard *guard_object); 2563 llvm::FunctionType *FTy = 2564 llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy), 2565 GuardPtrTy, /*isVarArg=*/false); 2566 return CGM.CreateRuntimeFunction( 2567 FTy, "__cxa_guard_acquire", 2568 llvm::AttributeList::get(CGM.getLLVMContext(), 2569 llvm::AttributeList::FunctionIndex, 2570 llvm::Attribute::NoUnwind)); 2571 } 2572 2573 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM, 2574 llvm::PointerType *GuardPtrTy) { 2575 // void __cxa_guard_release(__guard *guard_object); 2576 llvm::FunctionType *FTy = 2577 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 2578 return CGM.CreateRuntimeFunction( 2579 FTy, "__cxa_guard_release", 2580 llvm::AttributeList::get(CGM.getLLVMContext(), 2581 llvm::AttributeList::FunctionIndex, 2582 llvm::Attribute::NoUnwind)); 2583 } 2584 2585 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM, 2586 llvm::PointerType *GuardPtrTy) { 2587 // void __cxa_guard_abort(__guard *guard_object); 2588 llvm::FunctionType *FTy = 2589 llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); 2590 return CGM.CreateRuntimeFunction( 2591 FTy, "__cxa_guard_abort", 2592 llvm::AttributeList::get(CGM.getLLVMContext(), 2593 llvm::AttributeList::FunctionIndex, 2594 llvm::Attribute::NoUnwind)); 2595 } 2596 2597 namespace { 2598 struct CallGuardAbort final : EHScopeStack::Cleanup { 2599 llvm::GlobalVariable *Guard; 2600 CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {} 2601 2602 void Emit(CodeGenFunction &CGF, Flags flags) override { 2603 CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()), 2604 Guard); 2605 } 2606 }; 2607 } 2608 2609 /// The ARM code here follows the Itanium code closely enough that we 2610 /// just special-case it at particular places. 2611 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF, 2612 const VarDecl &D, 2613 llvm::GlobalVariable *var, 2614 bool shouldPerformInit) { 2615 CGBuilderTy &Builder = CGF.Builder; 2616 2617 // Inline variables that weren't instantiated from variable templates have 2618 // partially-ordered initialization within their translation unit. 2619 bool NonTemplateInline = 2620 D.isInline() && 2621 !isTemplateInstantiation(D.getTemplateSpecializationKind()); 2622 2623 // We only need to use thread-safe statics for local non-TLS variables and 2624 // inline variables; other global initialization is always single-threaded 2625 // or (through lazy dynamic loading in multiple threads) unsequenced. 2626 bool threadsafe = getContext().getLangOpts().ThreadsafeStatics && 2627 (D.isLocalVarDecl() || NonTemplateInline) && 2628 !D.getTLSKind(); 2629 2630 // If we have a global variable with internal linkage and thread-safe statics 2631 // are disabled, we can just let the guard variable be of type i8. 2632 bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage(); 2633 2634 llvm::IntegerType *guardTy; 2635 CharUnits guardAlignment; 2636 if (useInt8GuardVariable) { 2637 guardTy = CGF.Int8Ty; 2638 guardAlignment = CharUnits::One(); 2639 } else { 2640 // Guard variables are 64 bits in the generic ABI and size width on ARM 2641 // (i.e. 32-bit on AArch32, 64-bit on AArch64). 2642 if (UseARMGuardVarABI) { 2643 guardTy = CGF.SizeTy; 2644 guardAlignment = CGF.getSizeAlign(); 2645 } else { 2646 guardTy = CGF.Int64Ty; 2647 guardAlignment = 2648 CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlign(guardTy)); 2649 } 2650 } 2651 llvm::PointerType *guardPtrTy = llvm::PointerType::get( 2652 CGF.CGM.getLLVMContext(), 2653 CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace()); 2654 2655 // Create the guard variable if we don't already have it (as we 2656 // might if we're double-emitting this function body). 2657 llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D); 2658 if (!guard) { 2659 // Mangle the name for the guard. 2660 SmallString<256> guardName; 2661 { 2662 llvm::raw_svector_ostream out(guardName); 2663 getMangleContext().mangleStaticGuardVariable(&D, out); 2664 } 2665 2666 // Create the guard variable with a zero-initializer. 2667 // Just absorb linkage, visibility and dll storage class from the guarded 2668 // variable. 2669 guard = new llvm::GlobalVariable(CGM.getModule(), guardTy, 2670 false, var->getLinkage(), 2671 llvm::ConstantInt::get(guardTy, 0), 2672 guardName.str()); 2673 guard->setDSOLocal(var->isDSOLocal()); 2674 guard->setVisibility(var->getVisibility()); 2675 guard->setDLLStorageClass(var->getDLLStorageClass()); 2676 // If the variable is thread-local, so is its guard variable. 2677 guard->setThreadLocalMode(var->getThreadLocalMode()); 2678 guard->setAlignment(guardAlignment.getAsAlign()); 2679 2680 // The ABI says: "It is suggested that it be emitted in the same COMDAT 2681 // group as the associated data object." In practice, this doesn't work for 2682 // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm. 2683 llvm::Comdat *C = var->getComdat(); 2684 if (!D.isLocalVarDecl() && C && 2685 (CGM.getTarget().getTriple().isOSBinFormatELF() || 2686 CGM.getTarget().getTriple().isOSBinFormatWasm())) { 2687 guard->setComdat(C); 2688 } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) { 2689 guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName())); 2690 } 2691 2692 CGM.setStaticLocalDeclGuardAddress(&D, guard); 2693 } 2694 2695 Address guardAddr = Address(guard, guard->getValueType(), guardAlignment); 2696 2697 // Test whether the variable has completed initialization. 2698 // 2699 // Itanium C++ ABI 3.3.2: 2700 // The following is pseudo-code showing how these functions can be used: 2701 // if (obj_guard.first_byte == 0) { 2702 // if ( __cxa_guard_acquire (&obj_guard) ) { 2703 // try { 2704 // ... initialize the object ...; 2705 // } catch (...) { 2706 // __cxa_guard_abort (&obj_guard); 2707 // throw; 2708 // } 2709 // ... queue object destructor with __cxa_atexit() ...; 2710 // __cxa_guard_release (&obj_guard); 2711 // } 2712 // } 2713 // 2714 // If threadsafe statics are enabled, but we don't have inline atomics, just 2715 // call __cxa_guard_acquire unconditionally. The "inline" check isn't 2716 // actually inline, and the user might not expect calls to __atomic libcalls. 2717 2718 unsigned MaxInlineWidthInBits = CGF.getTarget().getMaxAtomicInlineWidth(); 2719 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end"); 2720 if (!threadsafe || MaxInlineWidthInBits) { 2721 // Load the first byte of the guard variable. 2722 llvm::LoadInst *LI = 2723 Builder.CreateLoad(guardAddr.withElementType(CGM.Int8Ty)); 2724 2725 // Itanium ABI: 2726 // An implementation supporting thread-safety on multiprocessor 2727 // systems must also guarantee that references to the initialized 2728 // object do not occur before the load of the initialization flag. 2729 // 2730 // In LLVM, we do this by marking the load Acquire. 2731 if (threadsafe) 2732 LI->setAtomic(llvm::AtomicOrdering::Acquire); 2733 2734 // For ARM, we should only check the first bit, rather than the entire byte: 2735 // 2736 // ARM C++ ABI 3.2.3.1: 2737 // To support the potential use of initialization guard variables 2738 // as semaphores that are the target of ARM SWP and LDREX/STREX 2739 // synchronizing instructions we define a static initialization 2740 // guard variable to be a 4-byte aligned, 4-byte word with the 2741 // following inline access protocol. 2742 // #define INITIALIZED 1 2743 // if ((obj_guard & INITIALIZED) != INITIALIZED) { 2744 // if (__cxa_guard_acquire(&obj_guard)) 2745 // ... 2746 // } 2747 // 2748 // and similarly for ARM64: 2749 // 2750 // ARM64 C++ ABI 3.2.2: 2751 // This ABI instead only specifies the value bit 0 of the static guard 2752 // variable; all other bits are platform defined. Bit 0 shall be 0 when the 2753 // variable is not initialized and 1 when it is. 2754 llvm::Value *V = 2755 (UseARMGuardVarABI && !useInt8GuardVariable) 2756 ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1)) 2757 : LI; 2758 llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized"); 2759 2760 llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check"); 2761 2762 // Check if the first byte of the guard variable is zero. 2763 CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock, 2764 CodeGenFunction::GuardKind::VariableGuard, &D); 2765 2766 CGF.EmitBlock(InitCheckBlock); 2767 } 2768 2769 // The semantics of dynamic initialization of variables with static or thread 2770 // storage duration depends on whether they are declared at block-scope. The 2771 // initialization of such variables at block-scope can be aborted with an 2772 // exception and later retried (per C++20 [stmt.dcl]p4), and recursive entry 2773 // to their initialization has undefined behavior (also per C++20 2774 // [stmt.dcl]p4). For such variables declared at non-block scope, exceptions 2775 // lead to termination (per C++20 [except.terminate]p1), and recursive 2776 // references to the variables are governed only by the lifetime rules (per 2777 // C++20 [class.cdtor]p2), which means such references are perfectly fine as 2778 // long as they avoid touching memory. As a result, block-scope variables must 2779 // not be marked as initialized until after initialization completes (unless 2780 // the mark is reverted following an exception), but non-block-scope variables 2781 // must be marked prior to initialization so that recursive accesses during 2782 // initialization do not restart initialization. 2783 2784 // Variables used when coping with thread-safe statics and exceptions. 2785 if (threadsafe) { 2786 // Call __cxa_guard_acquire. 2787 llvm::Value *V 2788 = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard); 2789 2790 llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init"); 2791 2792 Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"), 2793 InitBlock, EndBlock); 2794 2795 // Call __cxa_guard_abort along the exceptional edge. 2796 CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard); 2797 2798 CGF.EmitBlock(InitBlock); 2799 } else if (!D.isLocalVarDecl()) { 2800 // For non-local variables, store 1 into the first byte of the guard 2801 // variable before the object initialization begins so that references 2802 // to the variable during initialization don't restart initialization. 2803 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1), 2804 guardAddr.withElementType(CGM.Int8Ty)); 2805 } 2806 2807 // Emit the initializer and add a global destructor if appropriate. 2808 CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit); 2809 2810 if (threadsafe) { 2811 // Pop the guard-abort cleanup if we pushed one. 2812 CGF.PopCleanupBlock(); 2813 2814 // Call __cxa_guard_release. This cannot throw. 2815 CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy), 2816 guardAddr.emitRawPointer(CGF)); 2817 } else if (D.isLocalVarDecl()) { 2818 // For local variables, store 1 into the first byte of the guard variable 2819 // after the object initialization completes so that initialization is 2820 // retried if initialization is interrupted by an exception. 2821 Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1), 2822 guardAddr.withElementType(CGM.Int8Ty)); 2823 } 2824 2825 CGF.EmitBlock(EndBlock); 2826 } 2827 2828 /// Register a global destructor using __cxa_atexit. 2829 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, 2830 llvm::FunctionCallee dtor, 2831 llvm::Constant *addr, bool TLS) { 2832 assert(!CGF.getTarget().getTriple().isOSAIX() && 2833 "unexpected call to emitGlobalDtorWithCXAAtExit"); 2834 assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) && 2835 "__cxa_atexit is disabled"); 2836 const char *Name = "__cxa_atexit"; 2837 if (TLS) { 2838 const llvm::Triple &T = CGF.getTarget().getTriple(); 2839 Name = T.isOSDarwin() ? "_tlv_atexit" : "__cxa_thread_atexit"; 2840 } 2841 2842 // We're assuming that the destructor function is something we can 2843 // reasonably call with the default CC. 2844 llvm::Type *dtorTy = CGF.UnqualPtrTy; 2845 2846 // Preserve address space of addr. 2847 auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0; 2848 auto AddrPtrTy = AddrAS ? llvm::PointerType::get(CGF.getLLVMContext(), AddrAS) 2849 : CGF.Int8PtrTy; 2850 2851 // Create a variable that binds the atexit to this shared object. 2852 llvm::Constant *handle = 2853 CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle"); 2854 auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts()); 2855 GV->setVisibility(llvm::GlobalValue::HiddenVisibility); 2856 2857 // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d); 2858 llvm::Type *paramTys[] = {dtorTy, AddrPtrTy, handle->getType()}; 2859 llvm::FunctionType *atexitTy = 2860 llvm::FunctionType::get(CGF.IntTy, paramTys, false); 2861 2862 // Fetch the actual function. 2863 llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name); 2864 if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee())) 2865 fn->setDoesNotThrow(); 2866 2867 const auto &Context = CGF.CGM.getContext(); 2868 FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention( 2869 /*IsVariadic=*/false, /*IsCXXMethod=*/false)); 2870 QualType fnType = 2871 Context.getFunctionType(Context.VoidTy, {Context.VoidPtrTy}, EPI); 2872 llvm::Constant *dtorCallee = cast<llvm::Constant>(dtor.getCallee()); 2873 dtorCallee = CGF.CGM.getFunctionPointer(dtorCallee, fnType); 2874 2875 if (!addr) 2876 // addr is null when we are trying to register a dtor annotated with 2877 // __attribute__((destructor)) in a constructor function. Using null here is 2878 // okay because this argument is just passed back to the destructor 2879 // function. 2880 addr = llvm::Constant::getNullValue(CGF.Int8PtrTy); 2881 2882 llvm::Value *args[] = {dtorCallee, addr, handle}; 2883 CGF.EmitNounwindRuntimeCall(atexit, args); 2884 } 2885 2886 static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM, 2887 StringRef FnName) { 2888 // Create a function that registers/unregisters destructors that have the same 2889 // priority. 2890 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false); 2891 llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction( 2892 FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation()); 2893 2894 return GlobalInitOrCleanupFn; 2895 } 2896 2897 void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() { 2898 for (const auto &I : DtorsUsingAtExit) { 2899 int Priority = I.first; 2900 std::string GlobalCleanupFnName = 2901 std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority); 2902 2903 llvm::Function *GlobalCleanupFn = 2904 createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName); 2905 2906 CodeGenFunction CGF(*this); 2907 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn, 2908 getTypes().arrangeNullaryFunction(), FunctionArgList(), 2909 SourceLocation(), SourceLocation()); 2910 auto AL = ApplyDebugLocation::CreateArtificial(CGF); 2911 2912 // Get the destructor function type, void(*)(void). 2913 llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false); 2914 2915 // Destructor functions are run/unregistered in non-ascending 2916 // order of their priorities. 2917 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second; 2918 auto itv = Dtors.rbegin(); 2919 while (itv != Dtors.rend()) { 2920 llvm::Function *Dtor = *itv; 2921 2922 // We're assuming that the destructor function is something we can 2923 // reasonably call with the correct CC. 2924 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(Dtor); 2925 llvm::Value *NeedsDestruct = 2926 CGF.Builder.CreateIsNull(V, "needs_destruct"); 2927 2928 llvm::BasicBlock *DestructCallBlock = 2929 CGF.createBasicBlock("destruct.call"); 2930 llvm::BasicBlock *EndBlock = CGF.createBasicBlock( 2931 (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end"); 2932 // Check if unatexit returns a value of 0. If it does, jump to 2933 // DestructCallBlock, otherwise jump to EndBlock directly. 2934 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock); 2935 2936 CGF.EmitBlock(DestructCallBlock); 2937 2938 // Emit the call to casted Dtor. 2939 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, Dtor); 2940 // Make sure the call and the callee agree on calling convention. 2941 CI->setCallingConv(Dtor->getCallingConv()); 2942 2943 CGF.EmitBlock(EndBlock); 2944 2945 itv++; 2946 } 2947 2948 CGF.FinishFunction(); 2949 AddGlobalDtor(GlobalCleanupFn, Priority); 2950 } 2951 } 2952 2953 void CodeGenModule::registerGlobalDtorsWithAtExit() { 2954 for (const auto &I : DtorsUsingAtExit) { 2955 int Priority = I.first; 2956 std::string GlobalInitFnName = 2957 std::string("__GLOBAL_init_") + llvm::to_string(Priority); 2958 llvm::Function *GlobalInitFn = 2959 createGlobalInitOrCleanupFn(*this, GlobalInitFnName); 2960 2961 CodeGenFunction CGF(*this); 2962 CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn, 2963 getTypes().arrangeNullaryFunction(), FunctionArgList(), 2964 SourceLocation(), SourceLocation()); 2965 auto AL = ApplyDebugLocation::CreateArtificial(CGF); 2966 2967 // Since constructor functions are run in non-descending order of their 2968 // priorities, destructors are registered in non-descending order of their 2969 // priorities, and since destructor functions are run in the reverse order 2970 // of their registration, destructor functions are run in non-ascending 2971 // order of their priorities. 2972 const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second; 2973 for (auto *Dtor : Dtors) { 2974 // Register the destructor function calling __cxa_atexit if it is 2975 // available. Otherwise fall back on calling atexit. 2976 if (getCodeGenOpts().CXAAtExit) { 2977 emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false); 2978 } else { 2979 // We're assuming that the destructor function is something we can 2980 // reasonably call with the correct CC. 2981 CGF.registerGlobalDtorWithAtExit(Dtor); 2982 } 2983 } 2984 2985 CGF.FinishFunction(); 2986 AddGlobalCtor(GlobalInitFn, Priority); 2987 } 2988 2989 if (getCXXABI().useSinitAndSterm()) 2990 unregisterGlobalDtorsWithUnAtExit(); 2991 } 2992 2993 /// Register a global destructor as best as we know how. 2994 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 2995 llvm::FunctionCallee dtor, 2996 llvm::Constant *addr) { 2997 if (D.isNoDestroy(CGM.getContext())) 2998 return; 2999 3000 // HLSL doesn't support atexit. 3001 if (CGM.getLangOpts().HLSL) 3002 return CGM.AddCXXDtorEntry(dtor, addr); 3003 3004 // OpenMP offloading supports C++ constructors and destructors but we do not 3005 // always have 'atexit' available. Instead lower these to use the LLVM global 3006 // destructors which we can handle directly in the runtime. Note that this is 3007 // not strictly 1-to-1 with using `atexit` because we no longer tear down 3008 // globals in reverse order of when they were constructed. 3009 if (!CGM.getLangOpts().hasAtExit() && !D.isStaticLocal()) 3010 return CGF.registerGlobalDtorWithLLVM(D, dtor, addr); 3011 3012 // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit 3013 // or __cxa_atexit depending on whether this VarDecl is a thread-local storage 3014 // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled. 3015 // We can always use __cxa_thread_atexit. 3016 if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind()) 3017 return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind()); 3018 3019 // In Apple kexts, we want to add a global destructor entry. 3020 // FIXME: shouldn't this be guarded by some variable? 3021 if (CGM.getLangOpts().AppleKext) { 3022 // Generate a global destructor entry. 3023 return CGM.AddCXXDtorEntry(dtor, addr); 3024 } 3025 3026 CGF.registerGlobalDtorWithAtExit(D, dtor, addr); 3027 } 3028 3029 static bool isThreadWrapperReplaceable(const VarDecl *VD, 3030 CodeGen::CodeGenModule &CGM) { 3031 assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!"); 3032 // Darwin prefers to have references to thread local variables to go through 3033 // the thread wrapper instead of directly referencing the backing variable. 3034 return VD->getTLSKind() == VarDecl::TLS_Dynamic && 3035 CGM.getTarget().getTriple().isOSDarwin(); 3036 } 3037 3038 /// Get the appropriate linkage for the wrapper function. This is essentially 3039 /// the weak form of the variable's linkage; every translation unit which needs 3040 /// the wrapper emits a copy, and we want the linker to merge them. 3041 static llvm::GlobalValue::LinkageTypes 3042 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) { 3043 llvm::GlobalValue::LinkageTypes VarLinkage = 3044 CGM.getLLVMLinkageVarDefinition(VD); 3045 3046 // For internal linkage variables, we don't need an external or weak wrapper. 3047 if (llvm::GlobalValue::isLocalLinkage(VarLinkage)) 3048 return VarLinkage; 3049 3050 // If the thread wrapper is replaceable, give it appropriate linkage. 3051 if (isThreadWrapperReplaceable(VD, CGM)) 3052 if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) && 3053 !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage)) 3054 return VarLinkage; 3055 return llvm::GlobalValue::WeakODRLinkage; 3056 } 3057 3058 llvm::Function * 3059 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD, 3060 llvm::Value *Val) { 3061 // Mangle the name for the thread_local wrapper function. 3062 SmallString<256> WrapperName; 3063 { 3064 llvm::raw_svector_ostream Out(WrapperName); 3065 getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out); 3066 } 3067 3068 // FIXME: If VD is a definition, we should regenerate the function attributes 3069 // before returning. 3070 if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName)) 3071 return cast<llvm::Function>(V); 3072 3073 QualType RetQT = VD->getType(); 3074 if (RetQT->isReferenceType()) 3075 RetQT = RetQT.getNonReferenceType(); 3076 3077 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration( 3078 getContext().getPointerType(RetQT), FunctionArgList()); 3079 3080 llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI); 3081 llvm::Function *Wrapper = 3082 llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM), 3083 WrapperName.str(), &CGM.getModule()); 3084 3085 if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker()) 3086 Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName())); 3087 3088 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false); 3089 3090 // Always resolve references to the wrapper at link time. 3091 if (!Wrapper->hasLocalLinkage()) 3092 if (!isThreadWrapperReplaceable(VD, CGM) || 3093 llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) || 3094 llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) || 3095 VD->getVisibility() == HiddenVisibility) 3096 Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility); 3097 3098 if (isThreadWrapperReplaceable(VD, CGM)) { 3099 Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 3100 Wrapper->addFnAttr(llvm::Attribute::NoUnwind); 3101 } 3102 3103 ThreadWrappers.push_back({VD, Wrapper}); 3104 return Wrapper; 3105 } 3106 3107 void ItaniumCXXABI::EmitThreadLocalInitFuncs( 3108 CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals, 3109 ArrayRef<llvm::Function *> CXXThreadLocalInits, 3110 ArrayRef<const VarDecl *> CXXThreadLocalInitVars) { 3111 llvm::Function *InitFunc = nullptr; 3112 3113 // Separate initializers into those with ordered (or partially-ordered) 3114 // initialization and those with unordered initialization. 3115 llvm::SmallVector<llvm::Function *, 8> OrderedInits; 3116 llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits; 3117 for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) { 3118 if (isTemplateInstantiation( 3119 CXXThreadLocalInitVars[I]->getTemplateSpecializationKind())) 3120 UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] = 3121 CXXThreadLocalInits[I]; 3122 else 3123 OrderedInits.push_back(CXXThreadLocalInits[I]); 3124 } 3125 3126 if (!OrderedInits.empty()) { 3127 // Generate a guarded initialization function. 3128 llvm::FunctionType *FTy = 3129 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 3130 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 3131 InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI, 3132 SourceLocation(), 3133 /*TLS=*/true); 3134 llvm::GlobalVariable *Guard = new llvm::GlobalVariable( 3135 CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false, 3136 llvm::GlobalVariable::InternalLinkage, 3137 llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard"); 3138 Guard->setThreadLocal(true); 3139 Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel()); 3140 3141 CharUnits GuardAlign = CharUnits::One(); 3142 Guard->setAlignment(GuardAlign.getAsAlign()); 3143 3144 CodeGenFunction(CGM).GenerateCXXGlobalInitFunc( 3145 InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign)); 3146 // On Darwin platforms, use CXX_FAST_TLS calling convention. 3147 if (CGM.getTarget().getTriple().isOSDarwin()) { 3148 InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 3149 InitFunc->addFnAttr(llvm::Attribute::NoUnwind); 3150 } 3151 } 3152 3153 // Create declarations for thread wrappers for all thread-local variables 3154 // with non-discardable definitions in this translation unit. 3155 for (const VarDecl *VD : CXXThreadLocals) { 3156 if (VD->hasDefinition() && 3157 !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) { 3158 llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD)); 3159 getOrCreateThreadLocalWrapper(VD, GV); 3160 } 3161 } 3162 3163 // Emit all referenced thread wrappers. 3164 for (auto VDAndWrapper : ThreadWrappers) { 3165 const VarDecl *VD = VDAndWrapper.first; 3166 llvm::GlobalVariable *Var = 3167 cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD))); 3168 llvm::Function *Wrapper = VDAndWrapper.second; 3169 3170 // Some targets require that all access to thread local variables go through 3171 // the thread wrapper. This means that we cannot attempt to create a thread 3172 // wrapper or a thread helper. 3173 if (!VD->hasDefinition()) { 3174 if (isThreadWrapperReplaceable(VD, CGM)) { 3175 Wrapper->setLinkage(llvm::Function::ExternalLinkage); 3176 continue; 3177 } 3178 3179 // If this isn't a TU in which this variable is defined, the thread 3180 // wrapper is discardable. 3181 if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage) 3182 Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage); 3183 } 3184 3185 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper); 3186 3187 // Mangle the name for the thread_local initialization function. 3188 SmallString<256> InitFnName; 3189 { 3190 llvm::raw_svector_ostream Out(InitFnName); 3191 getMangleContext().mangleItaniumThreadLocalInit(VD, Out); 3192 } 3193 3194 llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false); 3195 3196 // If we have a definition for the variable, emit the initialization 3197 // function as an alias to the global Init function (if any). Otherwise, 3198 // produce a declaration of the initialization function. 3199 llvm::GlobalValue *Init = nullptr; 3200 bool InitIsInitFunc = false; 3201 bool HasConstantInitialization = false; 3202 if (!usesThreadWrapperFunction(VD)) { 3203 HasConstantInitialization = true; 3204 } else if (VD->hasDefinition()) { 3205 InitIsInitFunc = true; 3206 llvm::Function *InitFuncToUse = InitFunc; 3207 if (isTemplateInstantiation(VD->getTemplateSpecializationKind())) 3208 InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl()); 3209 if (InitFuncToUse) 3210 Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(), 3211 InitFuncToUse); 3212 } else { 3213 // Emit a weak global function referring to the initialization function. 3214 // This function will not exist if the TU defining the thread_local 3215 // variable in question does not need any dynamic initialization for 3216 // its thread_local variables. 3217 Init = llvm::Function::Create(InitFnTy, 3218 llvm::GlobalVariable::ExternalWeakLinkage, 3219 InitFnName.str(), &CGM.getModule()); 3220 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 3221 CGM.SetLLVMFunctionAttributes( 3222 GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false); 3223 } 3224 3225 if (Init) { 3226 Init->setVisibility(Var->getVisibility()); 3227 // Don't mark an extern_weak function DSO local on windows. 3228 if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage()) 3229 Init->setDSOLocal(Var->isDSOLocal()); 3230 } 3231 3232 llvm::LLVMContext &Context = CGM.getModule().getContext(); 3233 3234 // The linker on AIX is not happy with missing weak symbols. However, 3235 // other TUs will not know whether the initialization routine exists 3236 // so create an empty, init function to satisfy the linker. 3237 // This is needed whenever a thread wrapper function is not used, and 3238 // also when the symbol is weak. 3239 if (CGM.getTriple().isOSAIX() && VD->hasDefinition() && 3240 isEmittedWithConstantInitializer(VD, true) && 3241 !mayNeedDestruction(VD)) { 3242 // Init should be null. If it were non-null, then the logic above would 3243 // either be defining the function to be an alias or declaring the 3244 // function with the expectation that the definition of the variable 3245 // is elsewhere. 3246 assert(Init == nullptr && "Expected Init to be null."); 3247 3248 llvm::Function *Func = llvm::Function::Create( 3249 InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule()); 3250 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 3251 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, 3252 cast<llvm::Function>(Func), 3253 /*IsThunk=*/false); 3254 // Create a function body that just returns 3255 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func); 3256 CGBuilderTy Builder(CGM, Entry); 3257 Builder.CreateRetVoid(); 3258 } 3259 3260 llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper); 3261 CGBuilderTy Builder(CGM, Entry); 3262 if (HasConstantInitialization) { 3263 // No dynamic initialization to invoke. 3264 } else if (InitIsInitFunc) { 3265 if (Init) { 3266 llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init); 3267 if (isThreadWrapperReplaceable(VD, CGM)) { 3268 CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 3269 llvm::Function *Fn = 3270 cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee()); 3271 Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS); 3272 } 3273 } 3274 } else if (CGM.getTriple().isOSAIX()) { 3275 // On AIX, except if constinit and also neither of class type or of 3276 // (possibly multi-dimensional) array of class type, thread_local vars 3277 // will have init routines regardless of whether they are 3278 // const-initialized. Since the routine is guaranteed to exist, we can 3279 // unconditionally call it without testing for its existance. This 3280 // avoids potentially unresolved weak symbols which the AIX linker 3281 // isn't happy with. 3282 Builder.CreateCall(InitFnTy, Init); 3283 } else { 3284 // Don't know whether we have an init function. Call it if it exists. 3285 llvm::Value *Have = Builder.CreateIsNotNull(Init); 3286 llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper); 3287 llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper); 3288 Builder.CreateCondBr(Have, InitBB, ExitBB); 3289 3290 Builder.SetInsertPoint(InitBB); 3291 Builder.CreateCall(InitFnTy, Init); 3292 Builder.CreateBr(ExitBB); 3293 3294 Builder.SetInsertPoint(ExitBB); 3295 } 3296 3297 // For a reference, the result of the wrapper function is a pointer to 3298 // the referenced object. 3299 llvm::Value *Val = Builder.CreateThreadLocalAddress(Var); 3300 3301 if (VD->getType()->isReferenceType()) { 3302 CharUnits Align = CGM.getContext().getDeclAlign(VD); 3303 Val = Builder.CreateAlignedLoad(Var->getValueType(), Val, Align); 3304 } 3305 Val = Builder.CreateAddrSpaceCast(Val, Wrapper->getReturnType()); 3306 3307 Builder.CreateRet(Val); 3308 } 3309 } 3310 3311 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, 3312 const VarDecl *VD, 3313 QualType LValType) { 3314 llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD); 3315 llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val); 3316 3317 llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper); 3318 CallVal->setCallingConv(Wrapper->getCallingConv()); 3319 3320 LValue LV; 3321 if (VD->getType()->isReferenceType()) 3322 LV = CGF.MakeNaturalAlignRawAddrLValue(CallVal, LValType); 3323 else 3324 LV = CGF.MakeRawAddrLValue(CallVal, LValType, 3325 CGF.getContext().getDeclAlign(VD)); 3326 // FIXME: need setObjCGCLValueClass? 3327 return LV; 3328 } 3329 3330 /// Return whether the given global decl needs a VTT parameter, which it does 3331 /// if it's a base constructor or destructor with virtual bases. 3332 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { 3333 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 3334 3335 // We don't have any virtual bases, just return early. 3336 if (!MD->getParent()->getNumVBases()) 3337 return false; 3338 3339 // Check if we have a base constructor. 3340 if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base) 3341 return true; 3342 3343 // Check if we have a base destructor. 3344 if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) 3345 return true; 3346 3347 return false; 3348 } 3349 3350 llvm::Constant * 3351 ItaniumCXXABI::getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD) { 3352 SmallString<256> MethodName; 3353 llvm::raw_svector_ostream Out(MethodName); 3354 getMangleContext().mangleCXXName(MD, Out); 3355 MethodName += "_vfpthunk_"; 3356 StringRef ThunkName = MethodName.str(); 3357 llvm::Function *ThunkFn; 3358 if ((ThunkFn = cast_or_null<llvm::Function>( 3359 CGM.getModule().getNamedValue(ThunkName)))) 3360 return ThunkFn; 3361 3362 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeCXXMethodDeclaration(MD); 3363 llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo); 3364 llvm::GlobalValue::LinkageTypes Linkage = 3365 MD->isExternallyVisible() ? llvm::GlobalValue::LinkOnceODRLinkage 3366 : llvm::GlobalValue::InternalLinkage; 3367 ThunkFn = 3368 llvm::Function::Create(ThunkTy, Linkage, ThunkName, &CGM.getModule()); 3369 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage) 3370 ThunkFn->setVisibility(llvm::GlobalValue::HiddenVisibility); 3371 assert(ThunkFn->getName() == ThunkName && "name was uniqued!"); 3372 3373 CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn, /*IsThunk=*/true); 3374 CGM.SetLLVMFunctionAttributesForDefinition(MD, ThunkFn); 3375 3376 // Stack protection sometimes gets inserted after the musttail call. 3377 ThunkFn->removeFnAttr(llvm::Attribute::StackProtect); 3378 ThunkFn->removeFnAttr(llvm::Attribute::StackProtectStrong); 3379 ThunkFn->removeFnAttr(llvm::Attribute::StackProtectReq); 3380 3381 // Start codegen. 3382 CodeGenFunction CGF(CGM); 3383 CGF.CurGD = GlobalDecl(MD); 3384 CGF.CurFuncIsThunk = true; 3385 3386 // Build FunctionArgs. 3387 FunctionArgList FunctionArgs; 3388 CGF.BuildFunctionArgList(CGF.CurGD, FunctionArgs); 3389 3390 CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo, 3391 FunctionArgs, MD->getLocation(), SourceLocation()); 3392 llvm::Value *ThisVal = loadIncomingCXXThis(CGF); 3393 setCXXABIThisValue(CGF, ThisVal); 3394 3395 CallArgList CallArgs; 3396 for (const VarDecl *VD : FunctionArgs) 3397 CGF.EmitDelegateCallArg(CallArgs, VD, SourceLocation()); 3398 3399 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 3400 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, /*this*/ 1); 3401 const CGFunctionInfo &CallInfo = 3402 CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT, Required, 0); 3403 CGCallee Callee = CGCallee::forVirtual(nullptr, GlobalDecl(MD), 3404 getThisAddress(CGF), ThunkTy); 3405 llvm::CallBase *CallOrInvoke; 3406 CGF.EmitCall(CallInfo, Callee, ReturnValueSlot(), CallArgs, &CallOrInvoke, 3407 /*IsMustTail=*/true, SourceLocation(), true); 3408 auto *Call = cast<llvm::CallInst>(CallOrInvoke); 3409 Call->setTailCallKind(llvm::CallInst::TCK_MustTail); 3410 if (Call->getType()->isVoidTy()) 3411 CGF.Builder.CreateRetVoid(); 3412 else 3413 CGF.Builder.CreateRet(Call); 3414 3415 // Finish the function to maintain CodeGenFunction invariants. 3416 // FIXME: Don't emit unreachable code. 3417 CGF.EmitBlock(CGF.createBasicBlock()); 3418 CGF.FinishFunction(); 3419 return ThunkFn; 3420 } 3421 3422 namespace { 3423 class ItaniumRTTIBuilder { 3424 CodeGenModule &CGM; // Per-module state. 3425 llvm::LLVMContext &VMContext; 3426 const ItaniumCXXABI &CXXABI; // Per-module state. 3427 3428 /// Fields - The fields of the RTTI descriptor currently being built. 3429 SmallVector<llvm::Constant *, 16> Fields; 3430 3431 /// GetAddrOfTypeName - Returns the mangled type name of the given type. 3432 llvm::GlobalVariable * 3433 GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage); 3434 3435 /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI 3436 /// descriptor of the given type. 3437 llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty); 3438 3439 /// BuildVTablePointer - Build the vtable pointer for the given type. 3440 void BuildVTablePointer(const Type *Ty, llvm::Constant *StorageAddress); 3441 3442 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single 3443 /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b. 3444 void BuildSIClassTypeInfo(const CXXRecordDecl *RD); 3445 3446 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for 3447 /// classes with bases that do not satisfy the abi::__si_class_type_info 3448 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. 3449 void BuildVMIClassTypeInfo(const CXXRecordDecl *RD); 3450 3451 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used 3452 /// for pointer types. 3453 void BuildPointerTypeInfo(QualType PointeeTy); 3454 3455 /// BuildObjCObjectTypeInfo - Build the appropriate kind of 3456 /// type_info for an object type. 3457 void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty); 3458 3459 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info 3460 /// struct, used for member pointer types. 3461 void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty); 3462 3463 public: 3464 ItaniumRTTIBuilder(const ItaniumCXXABI &ABI) 3465 : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {} 3466 3467 // Pointer type info flags. 3468 enum { 3469 /// PTI_Const - Type has const qualifier. 3470 PTI_Const = 0x1, 3471 3472 /// PTI_Volatile - Type has volatile qualifier. 3473 PTI_Volatile = 0x2, 3474 3475 /// PTI_Restrict - Type has restrict qualifier. 3476 PTI_Restrict = 0x4, 3477 3478 /// PTI_Incomplete - Type is incomplete. 3479 PTI_Incomplete = 0x8, 3480 3481 /// PTI_ContainingClassIncomplete - Containing class is incomplete. 3482 /// (in pointer to member). 3483 PTI_ContainingClassIncomplete = 0x10, 3484 3485 /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS). 3486 //PTI_TransactionSafe = 0x20, 3487 3488 /// PTI_Noexcept - Pointee is noexcept function (C++1z). 3489 PTI_Noexcept = 0x40, 3490 }; 3491 3492 // VMI type info flags. 3493 enum { 3494 /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance. 3495 VMI_NonDiamondRepeat = 0x1, 3496 3497 /// VMI_DiamondShaped - Class is diamond shaped. 3498 VMI_DiamondShaped = 0x2 3499 }; 3500 3501 // Base class type info flags. 3502 enum { 3503 /// BCTI_Virtual - Base class is virtual. 3504 BCTI_Virtual = 0x1, 3505 3506 /// BCTI_Public - Base class is public. 3507 BCTI_Public = 0x2 3508 }; 3509 3510 /// BuildTypeInfo - Build the RTTI type info struct for the given type, or 3511 /// link to an existing RTTI descriptor if one already exists. 3512 llvm::Constant *BuildTypeInfo(QualType Ty); 3513 3514 /// BuildTypeInfo - Build the RTTI type info struct for the given type. 3515 llvm::Constant *BuildTypeInfo( 3516 QualType Ty, 3517 llvm::GlobalVariable::LinkageTypes Linkage, 3518 llvm::GlobalValue::VisibilityTypes Visibility, 3519 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass); 3520 }; 3521 } 3522 3523 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName( 3524 QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) { 3525 SmallString<256> Name; 3526 llvm::raw_svector_ostream Out(Name); 3527 CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out); 3528 3529 // We know that the mangled name of the type starts at index 4 of the 3530 // mangled name of the typename, so we can just index into it in order to 3531 // get the mangled name of the type. 3532 llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext, 3533 Name.substr(4)); 3534 auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy); 3535 3536 llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable( 3537 Name, Init->getType(), Linkage, Align.getAsAlign()); 3538 3539 GV->setInitializer(Init); 3540 3541 return GV; 3542 } 3543 3544 llvm::Constant * 3545 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) { 3546 // Mangle the RTTI name. 3547 SmallString<256> Name; 3548 llvm::raw_svector_ostream Out(Name); 3549 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); 3550 3551 // Look for an existing global. 3552 llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name); 3553 3554 if (!GV) { 3555 // Create a new global variable. 3556 // Note for the future: If we would ever like to do deferred emission of 3557 // RTTI, check if emitting vtables opportunistically need any adjustment. 3558 3559 GV = new llvm::GlobalVariable( 3560 CGM.getModule(), CGM.GlobalsInt8PtrTy, 3561 /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr, Name); 3562 const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); 3563 CGM.setGVProperties(GV, RD); 3564 // Import the typeinfo symbol when all non-inline virtual methods are 3565 // imported. 3566 if (CGM.getTarget().hasPS4DLLImportExport()) { 3567 if (RD && CXXRecordNonInlineHasAttr<DLLImportAttr>(RD)) { 3568 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass); 3569 CGM.setDSOLocal(GV); 3570 } 3571 } 3572 } 3573 3574 return GV; 3575 } 3576 3577 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type 3578 /// info for that type is defined in the standard library. 3579 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) { 3580 // Itanium C++ ABI 2.9.2: 3581 // Basic type information (e.g. for "int", "bool", etc.) will be kept in 3582 // the run-time support library. Specifically, the run-time support 3583 // library should contain type_info objects for the types X, X* and 3584 // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char, 3585 // unsigned char, signed char, short, unsigned short, int, unsigned int, 3586 // long, unsigned long, long long, unsigned long long, float, double, 3587 // long double, char16_t, char32_t, and the IEEE 754r decimal and 3588 // half-precision floating point types. 3589 // 3590 // GCC also emits RTTI for __int128. 3591 // FIXME: We do not emit RTTI information for decimal types here. 3592 3593 // Types added here must also be added to EmitFundamentalRTTIDescriptors. 3594 switch (Ty->getKind()) { 3595 case BuiltinType::Void: 3596 case BuiltinType::NullPtr: 3597 case BuiltinType::Bool: 3598 case BuiltinType::WChar_S: 3599 case BuiltinType::WChar_U: 3600 case BuiltinType::Char_U: 3601 case BuiltinType::Char_S: 3602 case BuiltinType::UChar: 3603 case BuiltinType::SChar: 3604 case BuiltinType::Short: 3605 case BuiltinType::UShort: 3606 case BuiltinType::Int: 3607 case BuiltinType::UInt: 3608 case BuiltinType::Long: 3609 case BuiltinType::ULong: 3610 case BuiltinType::LongLong: 3611 case BuiltinType::ULongLong: 3612 case BuiltinType::Half: 3613 case BuiltinType::Float: 3614 case BuiltinType::Double: 3615 case BuiltinType::LongDouble: 3616 case BuiltinType::Float16: 3617 case BuiltinType::Float128: 3618 case BuiltinType::Ibm128: 3619 case BuiltinType::Char8: 3620 case BuiltinType::Char16: 3621 case BuiltinType::Char32: 3622 case BuiltinType::Int128: 3623 case BuiltinType::UInt128: 3624 return true; 3625 3626 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \ 3627 case BuiltinType::Id: 3628 #include "clang/Basic/OpenCLImageTypes.def" 3629 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \ 3630 case BuiltinType::Id: 3631 #include "clang/Basic/OpenCLExtensionTypes.def" 3632 case BuiltinType::OCLSampler: 3633 case BuiltinType::OCLEvent: 3634 case BuiltinType::OCLClkEvent: 3635 case BuiltinType::OCLQueue: 3636 case BuiltinType::OCLReserveID: 3637 #define SVE_TYPE(Name, Id, SingletonId) \ 3638 case BuiltinType::Id: 3639 #include "clang/Basic/AArch64SVEACLETypes.def" 3640 #define PPC_VECTOR_TYPE(Name, Id, Size) \ 3641 case BuiltinType::Id: 3642 #include "clang/Basic/PPCTypes.def" 3643 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 3644 #include "clang/Basic/RISCVVTypes.def" 3645 #define WASM_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 3646 #include "clang/Basic/WebAssemblyReferenceTypes.def" 3647 #define AMDGPU_TYPE(Name, Id, SingletonId, Width, Align) case BuiltinType::Id: 3648 #include "clang/Basic/AMDGPUTypes.def" 3649 #define HLSL_INTANGIBLE_TYPE(Name, Id, SingletonId) case BuiltinType::Id: 3650 #include "clang/Basic/HLSLIntangibleTypes.def" 3651 case BuiltinType::ShortAccum: 3652 case BuiltinType::Accum: 3653 case BuiltinType::LongAccum: 3654 case BuiltinType::UShortAccum: 3655 case BuiltinType::UAccum: 3656 case BuiltinType::ULongAccum: 3657 case BuiltinType::ShortFract: 3658 case BuiltinType::Fract: 3659 case BuiltinType::LongFract: 3660 case BuiltinType::UShortFract: 3661 case BuiltinType::UFract: 3662 case BuiltinType::ULongFract: 3663 case BuiltinType::SatShortAccum: 3664 case BuiltinType::SatAccum: 3665 case BuiltinType::SatLongAccum: 3666 case BuiltinType::SatUShortAccum: 3667 case BuiltinType::SatUAccum: 3668 case BuiltinType::SatULongAccum: 3669 case BuiltinType::SatShortFract: 3670 case BuiltinType::SatFract: 3671 case BuiltinType::SatLongFract: 3672 case BuiltinType::SatUShortFract: 3673 case BuiltinType::SatUFract: 3674 case BuiltinType::SatULongFract: 3675 case BuiltinType::BFloat16: 3676 return false; 3677 3678 case BuiltinType::Dependent: 3679 #define BUILTIN_TYPE(Id, SingletonId) 3680 #define PLACEHOLDER_TYPE(Id, SingletonId) \ 3681 case BuiltinType::Id: 3682 #include "clang/AST/BuiltinTypes.def" 3683 llvm_unreachable("asking for RRTI for a placeholder type!"); 3684 3685 case BuiltinType::ObjCId: 3686 case BuiltinType::ObjCClass: 3687 case BuiltinType::ObjCSel: 3688 llvm_unreachable("FIXME: Objective-C types are unsupported!"); 3689 } 3690 3691 llvm_unreachable("Invalid BuiltinType Kind!"); 3692 } 3693 3694 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) { 3695 QualType PointeeTy = PointerTy->getPointeeType(); 3696 const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy); 3697 if (!BuiltinTy) 3698 return false; 3699 3700 // Check the qualifiers. 3701 Qualifiers Quals = PointeeTy.getQualifiers(); 3702 Quals.removeConst(); 3703 3704 if (!Quals.empty()) 3705 return false; 3706 3707 return TypeInfoIsInStandardLibrary(BuiltinTy); 3708 } 3709 3710 /// IsStandardLibraryRTTIDescriptor - Returns whether the type 3711 /// information for the given type exists in the standard library. 3712 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) { 3713 // Type info for builtin types is defined in the standard library. 3714 if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty)) 3715 return TypeInfoIsInStandardLibrary(BuiltinTy); 3716 3717 // Type info for some pointer types to builtin types is defined in the 3718 // standard library. 3719 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty)) 3720 return TypeInfoIsInStandardLibrary(PointerTy); 3721 3722 return false; 3723 } 3724 3725 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for 3726 /// the given type exists somewhere else, and that we should not emit the type 3727 /// information in this translation unit. Assumes that it is not a 3728 /// standard-library type. 3729 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, 3730 QualType Ty) { 3731 ASTContext &Context = CGM.getContext(); 3732 3733 // If RTTI is disabled, assume it might be disabled in the 3734 // translation unit that defines any potential key function, too. 3735 if (!Context.getLangOpts().RTTI) return false; 3736 3737 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { 3738 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); 3739 if (!RD->hasDefinition()) 3740 return false; 3741 3742 if (!RD->isDynamicClass()) 3743 return false; 3744 3745 // FIXME: this may need to be reconsidered if the key function 3746 // changes. 3747 // N.B. We must always emit the RTTI data ourselves if there exists a key 3748 // function. 3749 bool IsDLLImport = RD->hasAttr<DLLImportAttr>(); 3750 3751 // Don't import the RTTI but emit it locally. 3752 if (CGM.getTriple().isWindowsGNUEnvironment()) 3753 return false; 3754 3755 if (CGM.getVTables().isVTableExternal(RD)) { 3756 if (CGM.getTarget().hasPS4DLLImportExport()) 3757 return true; 3758 3759 return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment() 3760 ? false 3761 : true; 3762 } 3763 if (IsDLLImport) 3764 return true; 3765 } 3766 3767 return false; 3768 } 3769 3770 /// IsIncompleteClassType - Returns whether the given record type is incomplete. 3771 static bool IsIncompleteClassType(const RecordType *RecordTy) { 3772 return !RecordTy->getDecl()->isCompleteDefinition(); 3773 } 3774 3775 /// ContainsIncompleteClassType - Returns whether the given type contains an 3776 /// incomplete class type. This is true if 3777 /// 3778 /// * The given type is an incomplete class type. 3779 /// * The given type is a pointer type whose pointee type contains an 3780 /// incomplete class type. 3781 /// * The given type is a member pointer type whose class is an incomplete 3782 /// class type. 3783 /// * The given type is a member pointer type whoise pointee type contains an 3784 /// incomplete class type. 3785 /// is an indirect or direct pointer to an incomplete class type. 3786 static bool ContainsIncompleteClassType(QualType Ty) { 3787 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { 3788 if (IsIncompleteClassType(RecordTy)) 3789 return true; 3790 } 3791 3792 if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty)) 3793 return ContainsIncompleteClassType(PointerTy->getPointeeType()); 3794 3795 if (const MemberPointerType *MemberPointerTy = 3796 dyn_cast<MemberPointerType>(Ty)) { 3797 // Check if the class type is incomplete. 3798 const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass()); 3799 if (IsIncompleteClassType(ClassType)) 3800 return true; 3801 3802 return ContainsIncompleteClassType(MemberPointerTy->getPointeeType()); 3803 } 3804 3805 return false; 3806 } 3807 3808 // CanUseSingleInheritance - Return whether the given record decl has a "single, 3809 // public, non-virtual base at offset zero (i.e. the derived class is dynamic 3810 // iff the base is)", according to Itanium C++ ABI, 2.95p6b. 3811 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) { 3812 // Check the number of bases. 3813 if (RD->getNumBases() != 1) 3814 return false; 3815 3816 // Get the base. 3817 CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(); 3818 3819 // Check that the base is not virtual. 3820 if (Base->isVirtual()) 3821 return false; 3822 3823 // Check that the base is public. 3824 if (Base->getAccessSpecifier() != AS_public) 3825 return false; 3826 3827 // Check that the class is dynamic iff the base is. 3828 auto *BaseDecl = 3829 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl()); 3830 if (!BaseDecl->isEmpty() && 3831 BaseDecl->isDynamicClass() != RD->isDynamicClass()) 3832 return false; 3833 3834 return true; 3835 } 3836 3837 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty, 3838 llvm::Constant *StorageAddress) { 3839 // abi::__class_type_info. 3840 static const char * const ClassTypeInfo = 3841 "_ZTVN10__cxxabiv117__class_type_infoE"; 3842 // abi::__si_class_type_info. 3843 static const char * const SIClassTypeInfo = 3844 "_ZTVN10__cxxabiv120__si_class_type_infoE"; 3845 // abi::__vmi_class_type_info. 3846 static const char * const VMIClassTypeInfo = 3847 "_ZTVN10__cxxabiv121__vmi_class_type_infoE"; 3848 3849 const char *VTableName = nullptr; 3850 3851 switch (Ty->getTypeClass()) { 3852 #define TYPE(Class, Base) 3853 #define ABSTRACT_TYPE(Class, Base) 3854 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 3855 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 3856 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 3857 #include "clang/AST/TypeNodes.inc" 3858 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 3859 3860 case Type::LValueReference: 3861 case Type::RValueReference: 3862 llvm_unreachable("References shouldn't get here"); 3863 3864 case Type::Auto: 3865 case Type::DeducedTemplateSpecialization: 3866 llvm_unreachable("Undeduced type shouldn't get here"); 3867 3868 case Type::Pipe: 3869 llvm_unreachable("Pipe types shouldn't get here"); 3870 3871 case Type::ArrayParameter: 3872 llvm_unreachable("Array Parameter types should not get here."); 3873 3874 case Type::Builtin: 3875 case Type::BitInt: 3876 // GCC treats vector and complex types as fundamental types. 3877 case Type::Vector: 3878 case Type::ExtVector: 3879 case Type::ConstantMatrix: 3880 case Type::Complex: 3881 case Type::Atomic: 3882 // FIXME: GCC treats block pointers as fundamental types?! 3883 case Type::BlockPointer: 3884 // abi::__fundamental_type_info. 3885 VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE"; 3886 break; 3887 3888 case Type::ConstantArray: 3889 case Type::IncompleteArray: 3890 case Type::VariableArray: 3891 // abi::__array_type_info. 3892 VTableName = "_ZTVN10__cxxabiv117__array_type_infoE"; 3893 break; 3894 3895 case Type::FunctionNoProto: 3896 case Type::FunctionProto: 3897 // abi::__function_type_info. 3898 VTableName = "_ZTVN10__cxxabiv120__function_type_infoE"; 3899 break; 3900 3901 case Type::Enum: 3902 // abi::__enum_type_info. 3903 VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE"; 3904 break; 3905 3906 case Type::Record: { 3907 const CXXRecordDecl *RD = 3908 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl()); 3909 3910 if (!RD->hasDefinition() || !RD->getNumBases()) { 3911 VTableName = ClassTypeInfo; 3912 } else if (CanUseSingleInheritance(RD)) { 3913 VTableName = SIClassTypeInfo; 3914 } else { 3915 VTableName = VMIClassTypeInfo; 3916 } 3917 3918 break; 3919 } 3920 3921 case Type::ObjCObject: 3922 // Ignore protocol qualifiers. 3923 Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr(); 3924 3925 // Handle id and Class. 3926 if (isa<BuiltinType>(Ty)) { 3927 VTableName = ClassTypeInfo; 3928 break; 3929 } 3930 3931 assert(isa<ObjCInterfaceType>(Ty)); 3932 [[fallthrough]]; 3933 3934 case Type::ObjCInterface: 3935 if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) { 3936 VTableName = SIClassTypeInfo; 3937 } else { 3938 VTableName = ClassTypeInfo; 3939 } 3940 break; 3941 3942 case Type::ObjCObjectPointer: 3943 case Type::Pointer: 3944 // abi::__pointer_type_info. 3945 VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE"; 3946 break; 3947 3948 case Type::MemberPointer: 3949 // abi::__pointer_to_member_type_info. 3950 VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE"; 3951 break; 3952 3953 case Type::HLSLAttributedResource: 3954 llvm_unreachable("HLSL doesn't support virtual functions"); 3955 } 3956 3957 llvm::Constant *VTable = nullptr; 3958 3959 // Check if the alias exists. If it doesn't, then get or create the global. 3960 if (CGM.getItaniumVTableContext().isRelativeLayout()) 3961 VTable = CGM.getModule().getNamedAlias(VTableName); 3962 if (!VTable) { 3963 llvm::Type *Ty = llvm::ArrayType::get(CGM.GlobalsInt8PtrTy, 0); 3964 VTable = CGM.getModule().getOrInsertGlobal(VTableName, Ty); 3965 } 3966 3967 CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts())); 3968 3969 llvm::Type *PtrDiffTy = 3970 CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType()); 3971 3972 // The vtable address point is 2. 3973 if (CGM.getItaniumVTableContext().isRelativeLayout()) { 3974 // The vtable address point is 8 bytes after its start: 3975 // 4 for the offset to top + 4 for the relative offset to rtti. 3976 llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8); 3977 VTable = 3978 llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight); 3979 } else { 3980 llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2); 3981 VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.GlobalsInt8PtrTy, 3982 VTable, Two); 3983 } 3984 3985 if (const auto &Schema = 3986 CGM.getCodeGenOpts().PointerAuth.CXXTypeInfoVTablePointer) 3987 VTable = CGM.getConstantSignedPointer( 3988 VTable, Schema, 3989 Schema.isAddressDiscriminated() ? StorageAddress : nullptr, 3990 GlobalDecl(), QualType(Ty, 0)); 3991 3992 Fields.push_back(VTable); 3993 } 3994 3995 /// Return the linkage that the type info and type info name constants 3996 /// should have for the given type. 3997 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM, 3998 QualType Ty) { 3999 // Itanium C++ ABI 2.9.5p7: 4000 // In addition, it and all of the intermediate abi::__pointer_type_info 4001 // structs in the chain down to the abi::__class_type_info for the 4002 // incomplete class type must be prevented from resolving to the 4003 // corresponding type_info structs for the complete class type, possibly 4004 // by making them local static objects. Finally, a dummy class RTTI is 4005 // generated for the incomplete type that will not resolve to the final 4006 // complete class RTTI (because the latter need not exist), possibly by 4007 // making it a local static object. 4008 if (ContainsIncompleteClassType(Ty)) 4009 return llvm::GlobalValue::InternalLinkage; 4010 4011 switch (Ty->getLinkage()) { 4012 case Linkage::Invalid: 4013 llvm_unreachable("Linkage hasn't been computed!"); 4014 4015 case Linkage::None: 4016 case Linkage::Internal: 4017 case Linkage::UniqueExternal: 4018 return llvm::GlobalValue::InternalLinkage; 4019 4020 case Linkage::VisibleNone: 4021 case Linkage::Module: 4022 case Linkage::External: 4023 // RTTI is not enabled, which means that this type info struct is going 4024 // to be used for exception handling. Give it linkonce_odr linkage. 4025 if (!CGM.getLangOpts().RTTI) 4026 return llvm::GlobalValue::LinkOnceODRLinkage; 4027 4028 if (const RecordType *Record = dyn_cast<RecordType>(Ty)) { 4029 const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl()); 4030 if (RD->hasAttr<WeakAttr>()) 4031 return llvm::GlobalValue::WeakODRLinkage; 4032 if (CGM.getTriple().isWindowsItaniumEnvironment()) 4033 if (RD->hasAttr<DLLImportAttr>() && 4034 ShouldUseExternalRTTIDescriptor(CGM, Ty)) 4035 return llvm::GlobalValue::ExternalLinkage; 4036 // MinGW always uses LinkOnceODRLinkage for type info. 4037 if (RD->isDynamicClass() && 4038 !CGM.getContext() 4039 .getTargetInfo() 4040 .getTriple() 4041 .isWindowsGNUEnvironment()) 4042 return CGM.getVTableLinkage(RD); 4043 } 4044 4045 return llvm::GlobalValue::LinkOnceODRLinkage; 4046 } 4047 4048 llvm_unreachable("Invalid linkage!"); 4049 } 4050 4051 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) { 4052 // We want to operate on the canonical type. 4053 Ty = Ty.getCanonicalType(); 4054 4055 // Check if we've already emitted an RTTI descriptor for this type. 4056 SmallString<256> Name; 4057 llvm::raw_svector_ostream Out(Name); 4058 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); 4059 4060 llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name); 4061 if (OldGV && !OldGV->isDeclaration()) { 4062 assert(!OldGV->hasAvailableExternallyLinkage() && 4063 "available_externally typeinfos not yet implemented"); 4064 4065 return OldGV; 4066 } 4067 4068 // Check if there is already an external RTTI descriptor for this type. 4069 if (IsStandardLibraryRTTIDescriptor(Ty) || 4070 ShouldUseExternalRTTIDescriptor(CGM, Ty)) 4071 return GetAddrOfExternalRTTIDescriptor(Ty); 4072 4073 // Emit the standard library with external linkage. 4074 llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty); 4075 4076 // Give the type_info object and name the formal visibility of the 4077 // type itself. 4078 llvm::GlobalValue::VisibilityTypes llvmVisibility; 4079 if (llvm::GlobalValue::isLocalLinkage(Linkage)) 4080 // If the linkage is local, only default visibility makes sense. 4081 llvmVisibility = llvm::GlobalValue::DefaultVisibility; 4082 else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) == 4083 ItaniumCXXABI::RUK_NonUniqueHidden) 4084 llvmVisibility = llvm::GlobalValue::HiddenVisibility; 4085 else 4086 llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility()); 4087 4088 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass = 4089 llvm::GlobalValue::DefaultStorageClass; 4090 if (auto RD = Ty->getAsCXXRecordDecl()) { 4091 if ((CGM.getTriple().isWindowsItaniumEnvironment() && 4092 RD->hasAttr<DLLExportAttr>()) || 4093 (CGM.shouldMapVisibilityToDLLExport(RD) && 4094 !llvm::GlobalValue::isLocalLinkage(Linkage) && 4095 llvmVisibility == llvm::GlobalValue::DefaultVisibility)) 4096 DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass; 4097 } 4098 return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass); 4099 } 4100 4101 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo( 4102 QualType Ty, 4103 llvm::GlobalVariable::LinkageTypes Linkage, 4104 llvm::GlobalValue::VisibilityTypes Visibility, 4105 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) { 4106 SmallString<256> Name; 4107 llvm::raw_svector_ostream Out(Name); 4108 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); 4109 llvm::Module &M = CGM.getModule(); 4110 llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name); 4111 // int8 is an arbitrary type to be replaced later with replaceInitializer. 4112 llvm::GlobalVariable *GV = 4113 new llvm::GlobalVariable(M, CGM.Int8Ty, /*isConstant=*/true, Linkage, 4114 /*Initializer=*/nullptr, Name); 4115 4116 // Add the vtable pointer. 4117 BuildVTablePointer(cast<Type>(Ty), GV); 4118 4119 // And the name. 4120 llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage); 4121 llvm::Constant *TypeNameField; 4122 4123 // If we're supposed to demote the visibility, be sure to set a flag 4124 // to use a string comparison for type_info comparisons. 4125 ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness = 4126 CXXABI.classifyRTTIUniqueness(Ty, Linkage); 4127 if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) { 4128 // The flag is the sign bit, which on ARM64 is defined to be clear 4129 // for global pointers. This is very ARM64-specific. 4130 TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty); 4131 llvm::Constant *flag = 4132 llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63); 4133 TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag); 4134 TypeNameField = 4135 llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.GlobalsInt8PtrTy); 4136 } else { 4137 TypeNameField = TypeName; 4138 } 4139 Fields.push_back(TypeNameField); 4140 4141 switch (Ty->getTypeClass()) { 4142 #define TYPE(Class, Base) 4143 #define ABSTRACT_TYPE(Class, Base) 4144 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 4145 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 4146 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 4147 #include "clang/AST/TypeNodes.inc" 4148 llvm_unreachable("Non-canonical and dependent types shouldn't get here"); 4149 4150 // GCC treats vector types as fundamental types. 4151 case Type::Builtin: 4152 case Type::Vector: 4153 case Type::ExtVector: 4154 case Type::ConstantMatrix: 4155 case Type::Complex: 4156 case Type::BlockPointer: 4157 // Itanium C++ ABI 2.9.5p4: 4158 // abi::__fundamental_type_info adds no data members to std::type_info. 4159 break; 4160 4161 case Type::LValueReference: 4162 case Type::RValueReference: 4163 llvm_unreachable("References shouldn't get here"); 4164 4165 case Type::Auto: 4166 case Type::DeducedTemplateSpecialization: 4167 llvm_unreachable("Undeduced type shouldn't get here"); 4168 4169 case Type::Pipe: 4170 break; 4171 4172 case Type::BitInt: 4173 break; 4174 4175 case Type::ConstantArray: 4176 case Type::IncompleteArray: 4177 case Type::VariableArray: 4178 case Type::ArrayParameter: 4179 // Itanium C++ ABI 2.9.5p5: 4180 // abi::__array_type_info adds no data members to std::type_info. 4181 break; 4182 4183 case Type::FunctionNoProto: 4184 case Type::FunctionProto: 4185 // Itanium C++ ABI 2.9.5p5: 4186 // abi::__function_type_info adds no data members to std::type_info. 4187 break; 4188 4189 case Type::Enum: 4190 // Itanium C++ ABI 2.9.5p5: 4191 // abi::__enum_type_info adds no data members to std::type_info. 4192 break; 4193 4194 case Type::Record: { 4195 const CXXRecordDecl *RD = 4196 cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl()); 4197 if (!RD->hasDefinition() || !RD->getNumBases()) { 4198 // We don't need to emit any fields. 4199 break; 4200 } 4201 4202 if (CanUseSingleInheritance(RD)) 4203 BuildSIClassTypeInfo(RD); 4204 else 4205 BuildVMIClassTypeInfo(RD); 4206 4207 break; 4208 } 4209 4210 case Type::ObjCObject: 4211 case Type::ObjCInterface: 4212 BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty)); 4213 break; 4214 4215 case Type::ObjCObjectPointer: 4216 BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType()); 4217 break; 4218 4219 case Type::Pointer: 4220 BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType()); 4221 break; 4222 4223 case Type::MemberPointer: 4224 BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty)); 4225 break; 4226 4227 case Type::Atomic: 4228 // No fields, at least for the moment. 4229 break; 4230 4231 case Type::HLSLAttributedResource: 4232 llvm_unreachable("HLSL doesn't support RTTI"); 4233 } 4234 4235 GV->replaceInitializer(llvm::ConstantStruct::getAnon(Fields)); 4236 4237 // Export the typeinfo in the same circumstances as the vtable is exported. 4238 auto GVDLLStorageClass = DLLStorageClass; 4239 if (CGM.getTarget().hasPS4DLLImportExport() && 4240 GVDLLStorageClass != llvm::GlobalVariable::DLLExportStorageClass) { 4241 if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { 4242 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); 4243 if (RD->hasAttr<DLLExportAttr>() || 4244 CXXRecordNonInlineHasAttr<DLLExportAttr>(RD)) 4245 GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass; 4246 } 4247 } 4248 4249 // If there's already an old global variable, replace it with the new one. 4250 if (OldGV) { 4251 GV->takeName(OldGV); 4252 OldGV->replaceAllUsesWith(GV); 4253 OldGV->eraseFromParent(); 4254 } 4255 4256 if (CGM.supportsCOMDAT() && GV->isWeakForLinker()) 4257 GV->setComdat(M.getOrInsertComdat(GV->getName())); 4258 4259 CharUnits Align = CGM.getContext().toCharUnitsFromBits( 4260 CGM.getTarget().getPointerAlign(CGM.GetGlobalVarAddressSpace(nullptr))); 4261 GV->setAlignment(Align.getAsAlign()); 4262 4263 // The Itanium ABI specifies that type_info objects must be globally 4264 // unique, with one exception: if the type is an incomplete class 4265 // type or a (possibly indirect) pointer to one. That exception 4266 // affects the general case of comparing type_info objects produced 4267 // by the typeid operator, which is why the comparison operators on 4268 // std::type_info generally use the type_info name pointers instead 4269 // of the object addresses. However, the language's built-in uses 4270 // of RTTI generally require class types to be complete, even when 4271 // manipulating pointers to those class types. This allows the 4272 // implementation of dynamic_cast to rely on address equality tests, 4273 // which is much faster. 4274 4275 // All of this is to say that it's important that both the type_info 4276 // object and the type_info name be uniqued when weakly emitted. 4277 4278 TypeName->setVisibility(Visibility); 4279 CGM.setDSOLocal(TypeName); 4280 4281 GV->setVisibility(Visibility); 4282 CGM.setDSOLocal(GV); 4283 4284 TypeName->setDLLStorageClass(DLLStorageClass); 4285 GV->setDLLStorageClass(GVDLLStorageClass); 4286 4287 TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition); 4288 GV->setPartition(CGM.getCodeGenOpts().SymbolPartition); 4289 4290 return GV; 4291 } 4292 4293 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info 4294 /// for the given Objective-C object type. 4295 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) { 4296 // Drop qualifiers. 4297 const Type *T = OT->getBaseType().getTypePtr(); 4298 assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T)); 4299 4300 // The builtin types are abi::__class_type_infos and don't require 4301 // extra fields. 4302 if (isa<BuiltinType>(T)) return; 4303 4304 ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl(); 4305 ObjCInterfaceDecl *Super = Class->getSuperClass(); 4306 4307 // Root classes are also __class_type_info. 4308 if (!Super) return; 4309 4310 QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super); 4311 4312 // Everything else is single inheritance. 4313 llvm::Constant *BaseTypeInfo = 4314 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy); 4315 Fields.push_back(BaseTypeInfo); 4316 } 4317 4318 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single 4319 /// inheritance, according to the Itanium C++ ABI, 2.95p6b. 4320 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) { 4321 // Itanium C++ ABI 2.9.5p6b: 4322 // It adds to abi::__class_type_info a single member pointing to the 4323 // type_info structure for the base type, 4324 llvm::Constant *BaseTypeInfo = 4325 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType()); 4326 Fields.push_back(BaseTypeInfo); 4327 } 4328 4329 namespace { 4330 /// SeenBases - Contains virtual and non-virtual bases seen when traversing 4331 /// a class hierarchy. 4332 struct SeenBases { 4333 llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases; 4334 llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases; 4335 }; 4336 } 4337 4338 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in 4339 /// abi::__vmi_class_type_info. 4340 /// 4341 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base, 4342 SeenBases &Bases) { 4343 4344 unsigned Flags = 0; 4345 4346 auto *BaseDecl = 4347 cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl()); 4348 4349 if (Base->isVirtual()) { 4350 // Mark the virtual base as seen. 4351 if (!Bases.VirtualBases.insert(BaseDecl).second) { 4352 // If this virtual base has been seen before, then the class is diamond 4353 // shaped. 4354 Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped; 4355 } else { 4356 if (Bases.NonVirtualBases.count(BaseDecl)) 4357 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 4358 } 4359 } else { 4360 // Mark the non-virtual base as seen. 4361 if (!Bases.NonVirtualBases.insert(BaseDecl).second) { 4362 // If this non-virtual base has been seen before, then the class has non- 4363 // diamond shaped repeated inheritance. 4364 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 4365 } else { 4366 if (Bases.VirtualBases.count(BaseDecl)) 4367 Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; 4368 } 4369 } 4370 4371 // Walk all bases. 4372 for (const auto &I : BaseDecl->bases()) 4373 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); 4374 4375 return Flags; 4376 } 4377 4378 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) { 4379 unsigned Flags = 0; 4380 SeenBases Bases; 4381 4382 // Walk all bases. 4383 for (const auto &I : RD->bases()) 4384 Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); 4385 4386 return Flags; 4387 } 4388 4389 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for 4390 /// classes with bases that do not satisfy the abi::__si_class_type_info 4391 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. 4392 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) { 4393 llvm::Type *UnsignedIntLTy = 4394 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 4395 4396 // Itanium C++ ABI 2.9.5p6c: 4397 // __flags is a word with flags describing details about the class 4398 // structure, which may be referenced by using the __flags_masks 4399 // enumeration. These flags refer to both direct and indirect bases. 4400 unsigned Flags = ComputeVMIClassTypeInfoFlags(RD); 4401 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 4402 4403 // Itanium C++ ABI 2.9.5p6c: 4404 // __base_count is a word with the number of direct proper base class 4405 // descriptions that follow. 4406 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases())); 4407 4408 if (!RD->getNumBases()) 4409 return; 4410 4411 // Now add the base class descriptions. 4412 4413 // Itanium C++ ABI 2.9.5p6c: 4414 // __base_info[] is an array of base class descriptions -- one for every 4415 // direct proper base. Each description is of the type: 4416 // 4417 // struct abi::__base_class_type_info { 4418 // public: 4419 // const __class_type_info *__base_type; 4420 // long __offset_flags; 4421 // 4422 // enum __offset_flags_masks { 4423 // __virtual_mask = 0x1, 4424 // __public_mask = 0x2, 4425 // __offset_shift = 8 4426 // }; 4427 // }; 4428 4429 // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long 4430 // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on 4431 // LLP64 platforms. 4432 // FIXME: Consider updating libc++abi to match, and extend this logic to all 4433 // LLP64 platforms. 4434 QualType OffsetFlagsTy = CGM.getContext().LongTy; 4435 const TargetInfo &TI = CGM.getContext().getTargetInfo(); 4436 if (TI.getTriple().isOSCygMing() && 4437 TI.getPointerWidth(LangAS::Default) > TI.getLongWidth()) 4438 OffsetFlagsTy = CGM.getContext().LongLongTy; 4439 llvm::Type *OffsetFlagsLTy = 4440 CGM.getTypes().ConvertType(OffsetFlagsTy); 4441 4442 for (const auto &Base : RD->bases()) { 4443 // The __base_type member points to the RTTI for the base type. 4444 Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType())); 4445 4446 auto *BaseDecl = 4447 cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl()); 4448 4449 int64_t OffsetFlags = 0; 4450 4451 // All but the lower 8 bits of __offset_flags are a signed offset. 4452 // For a non-virtual base, this is the offset in the object of the base 4453 // subobject. For a virtual base, this is the offset in the virtual table of 4454 // the virtual base offset for the virtual base referenced (negative). 4455 CharUnits Offset; 4456 if (Base.isVirtual()) 4457 Offset = 4458 CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl); 4459 else { 4460 const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); 4461 Offset = Layout.getBaseClassOffset(BaseDecl); 4462 }; 4463 4464 OffsetFlags = uint64_t(Offset.getQuantity()) << 8; 4465 4466 // The low-order byte of __offset_flags contains flags, as given by the 4467 // masks from the enumeration __offset_flags_masks. 4468 if (Base.isVirtual()) 4469 OffsetFlags |= BCTI_Virtual; 4470 if (Base.getAccessSpecifier() == AS_public) 4471 OffsetFlags |= BCTI_Public; 4472 4473 Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags)); 4474 } 4475 } 4476 4477 /// Compute the flags for a __pbase_type_info, and remove the corresponding 4478 /// pieces from \p Type. 4479 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) { 4480 unsigned Flags = 0; 4481 4482 if (Type.isConstQualified()) 4483 Flags |= ItaniumRTTIBuilder::PTI_Const; 4484 if (Type.isVolatileQualified()) 4485 Flags |= ItaniumRTTIBuilder::PTI_Volatile; 4486 if (Type.isRestrictQualified()) 4487 Flags |= ItaniumRTTIBuilder::PTI_Restrict; 4488 Type = Type.getUnqualifiedType(); 4489 4490 // Itanium C++ ABI 2.9.5p7: 4491 // When the abi::__pbase_type_info is for a direct or indirect pointer to an 4492 // incomplete class type, the incomplete target type flag is set. 4493 if (ContainsIncompleteClassType(Type)) 4494 Flags |= ItaniumRTTIBuilder::PTI_Incomplete; 4495 4496 if (auto *Proto = Type->getAs<FunctionProtoType>()) { 4497 if (Proto->isNothrow()) { 4498 Flags |= ItaniumRTTIBuilder::PTI_Noexcept; 4499 Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None); 4500 } 4501 } 4502 4503 return Flags; 4504 } 4505 4506 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, 4507 /// used for pointer types. 4508 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) { 4509 // Itanium C++ ABI 2.9.5p7: 4510 // __flags is a flag word describing the cv-qualification and other 4511 // attributes of the type pointed to 4512 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy); 4513 4514 llvm::Type *UnsignedIntLTy = 4515 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 4516 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 4517 4518 // Itanium C++ ABI 2.9.5p7: 4519 // __pointee is a pointer to the std::type_info derivation for the 4520 // unqualified type being pointed to. 4521 llvm::Constant *PointeeTypeInfo = 4522 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy); 4523 Fields.push_back(PointeeTypeInfo); 4524 } 4525 4526 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info 4527 /// struct, used for member pointer types. 4528 void 4529 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) { 4530 QualType PointeeTy = Ty->getPointeeType(); 4531 4532 // Itanium C++ ABI 2.9.5p7: 4533 // __flags is a flag word describing the cv-qualification and other 4534 // attributes of the type pointed to. 4535 unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy); 4536 4537 const RecordType *ClassType = cast<RecordType>(Ty->getClass()); 4538 if (IsIncompleteClassType(ClassType)) 4539 Flags |= PTI_ContainingClassIncomplete; 4540 4541 llvm::Type *UnsignedIntLTy = 4542 CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); 4543 Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); 4544 4545 // Itanium C++ ABI 2.9.5p7: 4546 // __pointee is a pointer to the std::type_info derivation for the 4547 // unqualified type being pointed to. 4548 llvm::Constant *PointeeTypeInfo = 4549 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy); 4550 Fields.push_back(PointeeTypeInfo); 4551 4552 // Itanium C++ ABI 2.9.5p9: 4553 // __context is a pointer to an abi::__class_type_info corresponding to the 4554 // class type containing the member pointed to 4555 // (e.g., the "A" in "int A::*"). 4556 Fields.push_back( 4557 ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0))); 4558 } 4559 4560 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) { 4561 return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty); 4562 } 4563 4564 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) { 4565 // Types added here must also be added to TypeInfoIsInStandardLibrary. 4566 QualType FundamentalTypes[] = { 4567 getContext().VoidTy, getContext().NullPtrTy, 4568 getContext().BoolTy, getContext().WCharTy, 4569 getContext().CharTy, getContext().UnsignedCharTy, 4570 getContext().SignedCharTy, getContext().ShortTy, 4571 getContext().UnsignedShortTy, getContext().IntTy, 4572 getContext().UnsignedIntTy, getContext().LongTy, 4573 getContext().UnsignedLongTy, getContext().LongLongTy, 4574 getContext().UnsignedLongLongTy, getContext().Int128Ty, 4575 getContext().UnsignedInt128Ty, getContext().HalfTy, 4576 getContext().FloatTy, getContext().DoubleTy, 4577 getContext().LongDoubleTy, getContext().Float128Ty, 4578 getContext().Char8Ty, getContext().Char16Ty, 4579 getContext().Char32Ty 4580 }; 4581 llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass = 4582 RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(RD) 4583 ? llvm::GlobalValue::DLLExportStorageClass 4584 : llvm::GlobalValue::DefaultStorageClass; 4585 llvm::GlobalValue::VisibilityTypes Visibility = 4586 CodeGenModule::GetLLVMVisibility(RD->getVisibility()); 4587 for (const QualType &FundamentalType : FundamentalTypes) { 4588 QualType PointerType = getContext().getPointerType(FundamentalType); 4589 QualType PointerTypeConst = getContext().getPointerType( 4590 FundamentalType.withConst()); 4591 for (QualType Type : {FundamentalType, PointerType, PointerTypeConst}) 4592 ItaniumRTTIBuilder(*this).BuildTypeInfo( 4593 Type, llvm::GlobalValue::ExternalLinkage, 4594 Visibility, DLLStorageClass); 4595 } 4596 } 4597 4598 /// What sort of uniqueness rules should we use for the RTTI for the 4599 /// given type? 4600 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness( 4601 QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const { 4602 if (shouldRTTIBeUnique()) 4603 return RUK_Unique; 4604 4605 // It's only necessary for linkonce_odr or weak_odr linkage. 4606 if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage && 4607 Linkage != llvm::GlobalValue::WeakODRLinkage) 4608 return RUK_Unique; 4609 4610 // It's only necessary with default visibility. 4611 if (CanTy->getVisibility() != DefaultVisibility) 4612 return RUK_Unique; 4613 4614 // If we're not required to publish this symbol, hide it. 4615 if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage) 4616 return RUK_NonUniqueHidden; 4617 4618 // If we're required to publish this symbol, as we might be under an 4619 // explicit instantiation, leave it with default visibility but 4620 // enable string-comparisons. 4621 assert(Linkage == llvm::GlobalValue::WeakODRLinkage); 4622 return RUK_NonUniqueVisible; 4623 } 4624 4625 // Find out how to codegen the complete destructor and constructor 4626 namespace { 4627 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT }; 4628 } 4629 static StructorCodegen getCodegenToUse(CodeGenModule &CGM, 4630 const CXXMethodDecl *MD) { 4631 if (!CGM.getCodeGenOpts().CXXCtorDtorAliases) 4632 return StructorCodegen::Emit; 4633 4634 // The complete and base structors are not equivalent if there are any virtual 4635 // bases, so emit separate functions. 4636 if (MD->getParent()->getNumVBases()) 4637 return StructorCodegen::Emit; 4638 4639 GlobalDecl AliasDecl; 4640 if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) { 4641 AliasDecl = GlobalDecl(DD, Dtor_Complete); 4642 } else { 4643 const auto *CD = cast<CXXConstructorDecl>(MD); 4644 AliasDecl = GlobalDecl(CD, Ctor_Complete); 4645 } 4646 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl); 4647 4648 if (llvm::GlobalValue::isDiscardableIfUnused(Linkage)) 4649 return StructorCodegen::RAUW; 4650 4651 // FIXME: Should we allow available_externally aliases? 4652 if (!llvm::GlobalAlias::isValidLinkage(Linkage)) 4653 return StructorCodegen::RAUW; 4654 4655 if (llvm::GlobalValue::isWeakForLinker(Linkage)) { 4656 // Only ELF and wasm support COMDATs with arbitrary names (C5/D5). 4657 if (CGM.getTarget().getTriple().isOSBinFormatELF() || 4658 CGM.getTarget().getTriple().isOSBinFormatWasm()) 4659 return StructorCodegen::COMDAT; 4660 return StructorCodegen::Emit; 4661 } 4662 4663 return StructorCodegen::Alias; 4664 } 4665 4666 static void emitConstructorDestructorAlias(CodeGenModule &CGM, 4667 GlobalDecl AliasDecl, 4668 GlobalDecl TargetDecl) { 4669 llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl); 4670 4671 StringRef MangledName = CGM.getMangledName(AliasDecl); 4672 llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName); 4673 if (Entry && !Entry->isDeclaration()) 4674 return; 4675 4676 auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl)); 4677 4678 // Create the alias with no name. 4679 auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee); 4680 4681 // Constructors and destructors are always unnamed_addr. 4682 Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); 4683 4684 // Switch any previous uses to the alias. 4685 if (Entry) { 4686 assert(Entry->getType() == Aliasee->getType() && 4687 "declaration exists with different type"); 4688 Alias->takeName(Entry); 4689 Entry->replaceAllUsesWith(Alias); 4690 Entry->eraseFromParent(); 4691 } else { 4692 Alias->setName(MangledName); 4693 } 4694 4695 // Finally, set up the alias with its proper name and attributes. 4696 CGM.SetCommonAttributes(AliasDecl, Alias); 4697 } 4698 4699 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) { 4700 auto *MD = cast<CXXMethodDecl>(GD.getDecl()); 4701 auto *CD = dyn_cast<CXXConstructorDecl>(MD); 4702 const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD); 4703 4704 StructorCodegen CGType = getCodegenToUse(CGM, MD); 4705 4706 if (CD ? GD.getCtorType() == Ctor_Complete 4707 : GD.getDtorType() == Dtor_Complete) { 4708 GlobalDecl BaseDecl; 4709 if (CD) 4710 BaseDecl = GD.getWithCtorType(Ctor_Base); 4711 else 4712 BaseDecl = GD.getWithDtorType(Dtor_Base); 4713 4714 if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) { 4715 emitConstructorDestructorAlias(CGM, GD, BaseDecl); 4716 return; 4717 } 4718 4719 if (CGType == StructorCodegen::RAUW) { 4720 StringRef MangledName = CGM.getMangledName(GD); 4721 auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl); 4722 CGM.addReplacement(MangledName, Aliasee); 4723 return; 4724 } 4725 } 4726 4727 // The base destructor is equivalent to the base destructor of its 4728 // base class if there is exactly one non-virtual base class with a 4729 // non-trivial destructor, there are no fields with a non-trivial 4730 // destructor, and the body of the destructor is trivial. 4731 if (DD && GD.getDtorType() == Dtor_Base && 4732 CGType != StructorCodegen::COMDAT && 4733 !CGM.TryEmitBaseDestructorAsAlias(DD)) 4734 return; 4735 4736 // FIXME: The deleting destructor is equivalent to the selected operator 4737 // delete if: 4738 // * either the delete is a destroying operator delete or the destructor 4739 // would be trivial if it weren't virtual, 4740 // * the conversion from the 'this' parameter to the first parameter of the 4741 // destructor is equivalent to a bitcast, 4742 // * the destructor does not have an implicit "this" return, and 4743 // * the operator delete has the same calling convention and IR function type 4744 // as the destructor. 4745 // In such cases we should try to emit the deleting dtor as an alias to the 4746 // selected 'operator delete'. 4747 4748 llvm::Function *Fn = CGM.codegenCXXStructor(GD); 4749 4750 if (CGType == StructorCodegen::COMDAT) { 4751 SmallString<256> Buffer; 4752 llvm::raw_svector_ostream Out(Buffer); 4753 if (DD) 4754 getMangleContext().mangleCXXDtorComdat(DD, Out); 4755 else 4756 getMangleContext().mangleCXXCtorComdat(CD, Out); 4757 llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str()); 4758 Fn->setComdat(C); 4759 } else { 4760 CGM.maybeSetTrivialComdat(*MD, *Fn); 4761 } 4762 } 4763 4764 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) { 4765 // void *__cxa_begin_catch(void*); 4766 llvm::FunctionType *FTy = llvm::FunctionType::get( 4767 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false); 4768 4769 return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch"); 4770 } 4771 4772 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) { 4773 // void __cxa_end_catch(); 4774 llvm::FunctionType *FTy = 4775 llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 4776 4777 return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch"); 4778 } 4779 4780 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) { 4781 // void *__cxa_get_exception_ptr(void*); 4782 llvm::FunctionType *FTy = llvm::FunctionType::get( 4783 CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false); 4784 4785 return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr"); 4786 } 4787 4788 namespace { 4789 /// A cleanup to call __cxa_end_catch. In many cases, the caught 4790 /// exception type lets us state definitively that the thrown exception 4791 /// type does not have a destructor. In particular: 4792 /// - Catch-alls tell us nothing, so we have to conservatively 4793 /// assume that the thrown exception might have a destructor. 4794 /// - Catches by reference behave according to their base types. 4795 /// - Catches of non-record types will only trigger for exceptions 4796 /// of non-record types, which never have destructors. 4797 /// - Catches of record types can trigger for arbitrary subclasses 4798 /// of the caught type, so we have to assume the actual thrown 4799 /// exception type might have a throwing destructor, even if the 4800 /// caught type's destructor is trivial or nothrow. 4801 struct CallEndCatch final : EHScopeStack::Cleanup { 4802 CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {} 4803 bool MightThrow; 4804 4805 void Emit(CodeGenFunction &CGF, Flags flags) override { 4806 if (!MightThrow) { 4807 CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM)); 4808 return; 4809 } 4810 4811 CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM)); 4812 } 4813 }; 4814 } 4815 4816 /// Emits a call to __cxa_begin_catch and enters a cleanup to call 4817 /// __cxa_end_catch. If -fassume-nothrow-exception-dtor is specified, we assume 4818 /// that the exception object's dtor is nothrow, therefore the __cxa_end_catch 4819 /// call can be marked as nounwind even if EndMightThrow is true. 4820 /// 4821 /// \param EndMightThrow - true if __cxa_end_catch might throw 4822 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF, 4823 llvm::Value *Exn, 4824 bool EndMightThrow) { 4825 llvm::CallInst *call = 4826 CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn); 4827 4828 CGF.EHStack.pushCleanup<CallEndCatch>( 4829 NormalAndEHCleanup, 4830 EndMightThrow && !CGF.CGM.getLangOpts().AssumeNothrowExceptionDtor); 4831 4832 return call; 4833 } 4834 4835 /// A "special initializer" callback for initializing a catch 4836 /// parameter during catch initialization. 4837 static void InitCatchParam(CodeGenFunction &CGF, 4838 const VarDecl &CatchParam, 4839 Address ParamAddr, 4840 SourceLocation Loc) { 4841 // Load the exception from where the landing pad saved it. 4842 llvm::Value *Exn = CGF.getExceptionFromSlot(); 4843 4844 CanQualType CatchType = 4845 CGF.CGM.getContext().getCanonicalType(CatchParam.getType()); 4846 llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType); 4847 4848 // If we're catching by reference, we can just cast the object 4849 // pointer to the appropriate pointer. 4850 if (isa<ReferenceType>(CatchType)) { 4851 QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType(); 4852 bool EndCatchMightThrow = CaughtType->isRecordType(); 4853 4854 // __cxa_begin_catch returns the adjusted object pointer. 4855 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow); 4856 4857 // We have no way to tell the personality function that we're 4858 // catching by reference, so if we're catching a pointer, 4859 // __cxa_begin_catch will actually return that pointer by value. 4860 if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) { 4861 QualType PointeeType = PT->getPointeeType(); 4862 4863 // When catching by reference, generally we should just ignore 4864 // this by-value pointer and use the exception object instead. 4865 if (!PointeeType->isRecordType()) { 4866 4867 // Exn points to the struct _Unwind_Exception header, which 4868 // we have to skip past in order to reach the exception data. 4869 unsigned HeaderSize = 4870 CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException(); 4871 AdjustedExn = 4872 CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize); 4873 4874 // However, if we're catching a pointer-to-record type that won't 4875 // work, because the personality function might have adjusted 4876 // the pointer. There's actually no way for us to fully satisfy 4877 // the language/ABI contract here: we can't use Exn because it 4878 // might have the wrong adjustment, but we can't use the by-value 4879 // pointer because it's off by a level of abstraction. 4880 // 4881 // The current solution is to dump the adjusted pointer into an 4882 // alloca, which breaks language semantics (because changing the 4883 // pointer doesn't change the exception) but at least works. 4884 // The better solution would be to filter out non-exact matches 4885 // and rethrow them, but this is tricky because the rethrow 4886 // really needs to be catchable by other sites at this landing 4887 // pad. The best solution is to fix the personality function. 4888 } else { 4889 // Pull the pointer for the reference type off. 4890 llvm::Type *PtrTy = CGF.ConvertTypeForMem(CaughtType); 4891 4892 // Create the temporary and write the adjusted pointer into it. 4893 Address ExnPtrTmp = 4894 CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp"); 4895 llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); 4896 CGF.Builder.CreateStore(Casted, ExnPtrTmp); 4897 4898 // Bind the reference to the temporary. 4899 AdjustedExn = ExnPtrTmp.emitRawPointer(CGF); 4900 } 4901 } 4902 4903 llvm::Value *ExnCast = 4904 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref"); 4905 CGF.Builder.CreateStore(ExnCast, ParamAddr); 4906 return; 4907 } 4908 4909 // Scalars and complexes. 4910 TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType); 4911 if (TEK != TEK_Aggregate) { 4912 llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false); 4913 4914 // If the catch type is a pointer type, __cxa_begin_catch returns 4915 // the pointer by value. 4916 if (CatchType->hasPointerRepresentation()) { 4917 llvm::Value *CastExn = 4918 CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted"); 4919 4920 switch (CatchType.getQualifiers().getObjCLifetime()) { 4921 case Qualifiers::OCL_Strong: 4922 CastExn = CGF.EmitARCRetainNonBlock(CastExn); 4923 [[fallthrough]]; 4924 4925 case Qualifiers::OCL_None: 4926 case Qualifiers::OCL_ExplicitNone: 4927 case Qualifiers::OCL_Autoreleasing: 4928 CGF.Builder.CreateStore(CastExn, ParamAddr); 4929 return; 4930 4931 case Qualifiers::OCL_Weak: 4932 CGF.EmitARCInitWeak(ParamAddr, CastExn); 4933 return; 4934 } 4935 llvm_unreachable("bad ownership qualifier!"); 4936 } 4937 4938 // Otherwise, it returns a pointer into the exception object. 4939 4940 LValue srcLV = CGF.MakeNaturalAlignAddrLValue(AdjustedExn, CatchType); 4941 LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType); 4942 switch (TEK) { 4943 case TEK_Complex: 4944 CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV, 4945 /*init*/ true); 4946 return; 4947 case TEK_Scalar: { 4948 llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc); 4949 CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true); 4950 return; 4951 } 4952 case TEK_Aggregate: 4953 llvm_unreachable("evaluation kind filtered out!"); 4954 } 4955 llvm_unreachable("bad evaluation kind"); 4956 } 4957 4958 assert(isa<RecordType>(CatchType) && "unexpected catch type!"); 4959 auto catchRD = CatchType->getAsCXXRecordDecl(); 4960 CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD); 4961 4962 llvm::Type *PtrTy = CGF.UnqualPtrTy; // addrspace 0 ok 4963 4964 // Check for a copy expression. If we don't have a copy expression, 4965 // that means a trivial copy is okay. 4966 const Expr *copyExpr = CatchParam.getInit(); 4967 if (!copyExpr) { 4968 llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true); 4969 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy), 4970 LLVMCatchTy, caughtExnAlignment); 4971 LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType); 4972 LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType); 4973 CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap); 4974 return; 4975 } 4976 4977 // We have to call __cxa_get_exception_ptr to get the adjusted 4978 // pointer before copying. 4979 llvm::CallInst *rawAdjustedExn = 4980 CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn); 4981 4982 // Cast that to the appropriate type. 4983 Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy), 4984 LLVMCatchTy, caughtExnAlignment); 4985 4986 // The copy expression is defined in terms of an OpaqueValueExpr. 4987 // Find it and map it to the adjusted expression. 4988 CodeGenFunction::OpaqueValueMapping 4989 opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr), 4990 CGF.MakeAddrLValue(adjustedExn, CatchParam.getType())); 4991 4992 // Call the copy ctor in a terminate scope. 4993 CGF.EHStack.pushTerminate(); 4994 4995 // Perform the copy construction. 4996 CGF.EmitAggExpr(copyExpr, 4997 AggValueSlot::forAddr(ParamAddr, Qualifiers(), 4998 AggValueSlot::IsNotDestructed, 4999 AggValueSlot::DoesNotNeedGCBarriers, 5000 AggValueSlot::IsNotAliased, 5001 AggValueSlot::DoesNotOverlap)); 5002 5003 // Leave the terminate scope. 5004 CGF.EHStack.popTerminate(); 5005 5006 // Undo the opaque value mapping. 5007 opaque.pop(); 5008 5009 // Finally we can call __cxa_begin_catch. 5010 CallBeginCatch(CGF, Exn, true); 5011 } 5012 5013 /// Begins a catch statement by initializing the catch variable and 5014 /// calling __cxa_begin_catch. 5015 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF, 5016 const CXXCatchStmt *S) { 5017 // We have to be very careful with the ordering of cleanups here: 5018 // C++ [except.throw]p4: 5019 // The destruction [of the exception temporary] occurs 5020 // immediately after the destruction of the object declared in 5021 // the exception-declaration in the handler. 5022 // 5023 // So the precise ordering is: 5024 // 1. Construct catch variable. 5025 // 2. __cxa_begin_catch 5026 // 3. Enter __cxa_end_catch cleanup 5027 // 4. Enter dtor cleanup 5028 // 5029 // We do this by using a slightly abnormal initialization process. 5030 // Delegation sequence: 5031 // - ExitCXXTryStmt opens a RunCleanupsScope 5032 // - EmitAutoVarAlloca creates the variable and debug info 5033 // - InitCatchParam initializes the variable from the exception 5034 // - CallBeginCatch calls __cxa_begin_catch 5035 // - CallBeginCatch enters the __cxa_end_catch cleanup 5036 // - EmitAutoVarCleanups enters the variable destructor cleanup 5037 // - EmitCXXTryStmt emits the code for the catch body 5038 // - EmitCXXTryStmt close the RunCleanupsScope 5039 5040 VarDecl *CatchParam = S->getExceptionDecl(); 5041 if (!CatchParam) { 5042 llvm::Value *Exn = CGF.getExceptionFromSlot(); 5043 CallBeginCatch(CGF, Exn, true); 5044 return; 5045 } 5046 5047 // Emit the local. 5048 CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam); 5049 InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc()); 5050 CGF.EmitAutoVarCleanups(var); 5051 } 5052 5053 /// Get or define the following function: 5054 /// void @__clang_call_terminate(i8* %exn) nounwind noreturn 5055 /// This code is used only in C++. 5056 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) { 5057 ASTContext &C = CGM.getContext(); 5058 const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration( 5059 C.VoidTy, {C.getPointerType(C.CharTy)}); 5060 llvm::FunctionType *fnTy = CGM.getTypes().GetFunctionType(FI); 5061 llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction( 5062 fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true); 5063 llvm::Function *fn = 5064 cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts()); 5065 if (fn->empty()) { 5066 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, fn, /*IsThunk=*/false); 5067 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, fn); 5068 fn->setDoesNotThrow(); 5069 fn->setDoesNotReturn(); 5070 5071 // What we really want is to massively penalize inlining without 5072 // forbidding it completely. The difference between that and 5073 // 'noinline' is negligible. 5074 fn->addFnAttr(llvm::Attribute::NoInline); 5075 5076 // Allow this function to be shared across translation units, but 5077 // we don't want it to turn into an exported symbol. 5078 fn->setLinkage(llvm::Function::LinkOnceODRLinkage); 5079 fn->setVisibility(llvm::Function::HiddenVisibility); 5080 if (CGM.supportsCOMDAT()) 5081 fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName())); 5082 5083 // Set up the function. 5084 llvm::BasicBlock *entry = 5085 llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn); 5086 CGBuilderTy builder(CGM, entry); 5087 5088 // Pull the exception pointer out of the parameter list. 5089 llvm::Value *exn = &*fn->arg_begin(); 5090 5091 // Call __cxa_begin_catch(exn). 5092 llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn); 5093 catchCall->setDoesNotThrow(); 5094 catchCall->setCallingConv(CGM.getRuntimeCC()); 5095 5096 // Call std::terminate(). 5097 llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn()); 5098 termCall->setDoesNotThrow(); 5099 termCall->setDoesNotReturn(); 5100 termCall->setCallingConv(CGM.getRuntimeCC()); 5101 5102 // std::terminate cannot return. 5103 builder.CreateUnreachable(); 5104 } 5105 return fnRef; 5106 } 5107 5108 llvm::CallInst * 5109 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF, 5110 llvm::Value *Exn) { 5111 // In C++, we want to call __cxa_begin_catch() before terminating. 5112 if (Exn) { 5113 assert(CGF.CGM.getLangOpts().CPlusPlus); 5114 return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn); 5115 } 5116 return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn()); 5117 } 5118 5119 std::pair<llvm::Value *, const CXXRecordDecl *> 5120 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This, 5121 const CXXRecordDecl *RD) { 5122 return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD}; 5123 } 5124 5125 llvm::Constant * 5126 ItaniumCXXABI::getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD) { 5127 const CXXMethodDecl *origMD = 5128 cast<CXXMethodDecl>(CGM.getItaniumVTableContext() 5129 .findOriginalMethod(MD->getCanonicalDecl()) 5130 .getDecl()); 5131 llvm::Constant *thunk = getOrCreateVirtualFunctionPointerThunk(origMD); 5132 QualType funcType = CGM.getContext().getMemberPointerType( 5133 MD->getType(), MD->getParent()->getTypeForDecl()); 5134 return CGM.getMemberFunctionPointer(thunk, funcType); 5135 } 5136 5137 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF, 5138 const CXXCatchStmt *C) { 5139 if (CGF.getTarget().hasFeature("exception-handling")) 5140 CGF.EHStack.pushCleanup<CatchRetScope>( 5141 NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad)); 5142 ItaniumCXXABI::emitBeginCatch(CGF, C); 5143 } 5144 5145 llvm::CallInst * 5146 WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF, 5147 llvm::Value *Exn) { 5148 // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on 5149 // the violating exception to mark it handled, but it is currently hard to do 5150 // with wasm EH instruction structure with catch/catch_all, we just call 5151 // std::terminate and ignore the violating exception as in CGCXXABI. 5152 // TODO Consider code transformation that makes calling __clang_call_terminate 5153 // possible. 5154 return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn); 5155 } 5156 5157 /// Register a global destructor as best as we know how. 5158 void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, 5159 llvm::FunctionCallee Dtor, 5160 llvm::Constant *Addr) { 5161 if (D.getTLSKind() != VarDecl::TLS_None) { 5162 llvm::PointerType *PtrTy = CGF.UnqualPtrTy; 5163 5164 // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...); 5165 llvm::FunctionType *AtExitTy = 5166 llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, PtrTy}, true); 5167 5168 // Fetch the actual function. 5169 llvm::FunctionCallee AtExit = 5170 CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np"); 5171 5172 // Create __dtor function for the var decl. 5173 llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit); 5174 5175 // Register above __dtor with atexit(). 5176 // First param is flags and must be 0, second param is function ptr 5177 llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy); 5178 CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub}); 5179 5180 // Cannot unregister TLS __dtor so done 5181 return; 5182 } 5183 5184 // Create __dtor function for the var decl. 5185 llvm::Function *DtorStub = 5186 cast<llvm::Function>(CGF.createAtExitStub(D, Dtor, Addr)); 5187 5188 // Register above __dtor with atexit(). 5189 CGF.registerGlobalDtorWithAtExit(DtorStub); 5190 5191 // Emit __finalize function to unregister __dtor and (as appropriate) call 5192 // __dtor. 5193 emitCXXStermFinalizer(D, DtorStub, Addr); 5194 } 5195 5196 void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub, 5197 llvm::Constant *addr) { 5198 llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false); 5199 SmallString<256> FnName; 5200 { 5201 llvm::raw_svector_ostream Out(FnName); 5202 getMangleContext().mangleDynamicStermFinalizer(&D, Out); 5203 } 5204 5205 // Create the finalization action associated with a variable. 5206 const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction(); 5207 llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction( 5208 FTy, FnName.str(), FI, D.getLocation()); 5209 5210 CodeGenFunction CGF(CGM); 5211 5212 CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI, 5213 FunctionArgList(), D.getLocation(), 5214 D.getInit()->getExprLoc()); 5215 5216 // The unatexit subroutine unregisters __dtor functions that were previously 5217 // registered by the atexit subroutine. If the referenced function is found, 5218 // the unatexit returns a value of 0, meaning that the cleanup is still 5219 // pending (and we should call the __dtor function). 5220 llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub); 5221 5222 llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct"); 5223 5224 llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call"); 5225 llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end"); 5226 5227 // Check if unatexit returns a value of 0. If it does, jump to 5228 // DestructCallBlock, otherwise jump to EndBlock directly. 5229 CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock); 5230 5231 CGF.EmitBlock(DestructCallBlock); 5232 5233 // Emit the call to dtorStub. 5234 llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub); 5235 5236 // Make sure the call and the callee agree on calling convention. 5237 CI->setCallingConv(dtorStub->getCallingConv()); 5238 5239 CGF.EmitBlock(EndBlock); 5240 5241 CGF.FinishFunction(); 5242 5243 if (auto *IPA = D.getAttr<InitPriorityAttr>()) { 5244 CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer, 5245 IPA->getPriority()); 5246 } else if (isTemplateInstantiation(D.getTemplateSpecializationKind()) || 5247 getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) { 5248 // According to C++ [basic.start.init]p2, class template static data 5249 // members (i.e., implicitly or explicitly instantiated specializations) 5250 // have unordered initialization. As a consequence, we can put them into 5251 // their own llvm.global_dtors entry. 5252 CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535); 5253 } else { 5254 CGM.AddCXXStermFinalizerEntry(StermFinalizer); 5255 } 5256 } 5257