xref: /freebsd-src/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp (revision 753f127f3ace09432b2baeffd71a308760641a62)
1 //===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This provides C++ code generation targeting the Itanium C++ ABI.  The class
10 // in this file generates structures that follow the Itanium C++ ABI, which is
11 // documented at:
12 //  https://itanium-cxx-abi.github.io/cxx-abi/abi.html
13 //  https://itanium-cxx-abi.github.io/cxx-abi/abi-eh.html
14 //
15 // It also supports the closely-related ARM ABI, documented at:
16 // https://developer.arm.com/documentation/ihi0041/g/
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "CGCXXABI.h"
21 #include "CGCleanup.h"
22 #include "CGRecordLayout.h"
23 #include "CGVTables.h"
24 #include "CodeGenFunction.h"
25 #include "CodeGenModule.h"
26 #include "TargetInfo.h"
27 #include "clang/AST/Attr.h"
28 #include "clang/AST/Mangle.h"
29 #include "clang/AST/StmtCXX.h"
30 #include "clang/AST/Type.h"
31 #include "clang/CodeGen/ConstantInitBuilder.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/ScopedPrinter.h"
38 
39 using namespace clang;
40 using namespace CodeGen;
41 
42 namespace {
43 class ItaniumCXXABI : public CodeGen::CGCXXABI {
44   /// VTables - All the vtables which have been defined.
45   llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
46 
47   /// All the thread wrapper functions that have been used.
48   llvm::SmallVector<std::pair<const VarDecl *, llvm::Function *>, 8>
49       ThreadWrappers;
50 
51 protected:
52   bool UseARMMethodPtrABI;
53   bool UseARMGuardVarABI;
54   bool Use32BitVTableOffsetABI;
55 
56   ItaniumMangleContext &getMangleContext() {
57     return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
58   }
59 
60 public:
61   ItaniumCXXABI(CodeGen::CodeGenModule &CGM,
62                 bool UseARMMethodPtrABI = false,
63                 bool UseARMGuardVarABI = false) :
64     CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
65     UseARMGuardVarABI(UseARMGuardVarABI),
66     Use32BitVTableOffsetABI(false) { }
67 
68   bool classifyReturnType(CGFunctionInfo &FI) const override;
69 
70   RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override {
71     // If C++ prohibits us from making a copy, pass by address.
72     if (!RD->canPassInRegisters())
73       return RAA_Indirect;
74     return RAA_Default;
75   }
76 
77   bool isThisCompleteObject(GlobalDecl GD) const override {
78     // The Itanium ABI has separate complete-object vs.  base-object
79     // variants of both constructors and destructors.
80     if (isa<CXXDestructorDecl>(GD.getDecl())) {
81       switch (GD.getDtorType()) {
82       case Dtor_Complete:
83       case Dtor_Deleting:
84         return true;
85 
86       case Dtor_Base:
87         return false;
88 
89       case Dtor_Comdat:
90         llvm_unreachable("emitting dtor comdat as function?");
91       }
92       llvm_unreachable("bad dtor kind");
93     }
94     if (isa<CXXConstructorDecl>(GD.getDecl())) {
95       switch (GD.getCtorType()) {
96       case Ctor_Complete:
97         return true;
98 
99       case Ctor_Base:
100         return false;
101 
102       case Ctor_CopyingClosure:
103       case Ctor_DefaultClosure:
104         llvm_unreachable("closure ctors in Itanium ABI?");
105 
106       case Ctor_Comdat:
107         llvm_unreachable("emitting ctor comdat as function?");
108       }
109       llvm_unreachable("bad dtor kind");
110     }
111 
112     // No other kinds.
113     return false;
114   }
115 
116   bool isZeroInitializable(const MemberPointerType *MPT) override;
117 
118   llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
119 
120   CGCallee
121     EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
122                                     const Expr *E,
123                                     Address This,
124                                     llvm::Value *&ThisPtrForCall,
125                                     llvm::Value *MemFnPtr,
126                                     const MemberPointerType *MPT) override;
127 
128   llvm::Value *
129     EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E,
130                                  Address Base,
131                                  llvm::Value *MemPtr,
132                                  const MemberPointerType *MPT) override;
133 
134   llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
135                                            const CastExpr *E,
136                                            llvm::Value *Src) override;
137   llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
138                                               llvm::Constant *Src) override;
139 
140   llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override;
141 
142   llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override;
143   llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
144                                         CharUnits offset) override;
145   llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override;
146   llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
147                                      CharUnits ThisAdjustment);
148 
149   llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
150                                            llvm::Value *L, llvm::Value *R,
151                                            const MemberPointerType *MPT,
152                                            bool Inequality) override;
153 
154   llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
155                                          llvm::Value *Addr,
156                                          const MemberPointerType *MPT) override;
157 
158   void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE,
159                                Address Ptr, QualType ElementType,
160                                const CXXDestructorDecl *Dtor) override;
161 
162   void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override;
163   void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override;
164 
165   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
166 
167   llvm::CallInst *
168   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
169                                       llvm::Value *Exn) override;
170 
171   void EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD);
172   llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
173   CatchTypeInfo
174   getAddrOfCXXCatchHandlerType(QualType Ty,
175                                QualType CatchHandlerType) override {
176     return CatchTypeInfo{getAddrOfRTTIDescriptor(Ty), 0};
177   }
178 
179   bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
180   void EmitBadTypeidCall(CodeGenFunction &CGF) override;
181   llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy,
182                           Address ThisPtr,
183                           llvm::Type *StdTypeInfoPtrTy) override;
184 
185   bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
186                                           QualType SrcRecordTy) override;
187 
188   llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, Address Value,
189                                    QualType SrcRecordTy, QualType DestTy,
190                                    QualType DestRecordTy,
191                                    llvm::BasicBlock *CastEnd) override;
192 
193   llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, Address Value,
194                                      QualType SrcRecordTy,
195                                      QualType DestTy) override;
196 
197   bool EmitBadCastCall(CodeGenFunction &CGF) override;
198 
199   llvm::Value *
200     GetVirtualBaseClassOffset(CodeGenFunction &CGF, Address This,
201                               const CXXRecordDecl *ClassDecl,
202                               const CXXRecordDecl *BaseClassDecl) override;
203 
204   void EmitCXXConstructors(const CXXConstructorDecl *D) override;
205 
206   AddedStructorArgCounts
207   buildStructorSignature(GlobalDecl GD,
208                          SmallVectorImpl<CanQualType> &ArgTys) override;
209 
210   bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
211                               CXXDtorType DT) const override {
212     // Itanium does not emit any destructor variant as an inline thunk.
213     // Delegating may occur as an optimization, but all variants are either
214     // emitted with external linkage or as linkonce if they are inline and used.
215     return false;
216   }
217 
218   void EmitCXXDestructors(const CXXDestructorDecl *D) override;
219 
220   void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy,
221                                  FunctionArgList &Params) override;
222 
223   void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override;
224 
225   AddedStructorArgs getImplicitConstructorArgs(CodeGenFunction &CGF,
226                                                const CXXConstructorDecl *D,
227                                                CXXCtorType Type,
228                                                bool ForVirtualBase,
229                                                bool Delegating) override;
230 
231   llvm::Value *getCXXDestructorImplicitParam(CodeGenFunction &CGF,
232                                              const CXXDestructorDecl *DD,
233                                              CXXDtorType Type,
234                                              bool ForVirtualBase,
235                                              bool Delegating) override;
236 
237   void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD,
238                           CXXDtorType Type, bool ForVirtualBase,
239                           bool Delegating, Address This,
240                           QualType ThisTy) override;
241 
242   void emitVTableDefinitions(CodeGenVTables &CGVT,
243                              const CXXRecordDecl *RD) override;
244 
245   bool isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
246                                            CodeGenFunction::VPtr Vptr) override;
247 
248   bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) override {
249     return true;
250   }
251 
252   llvm::Constant *
253   getVTableAddressPoint(BaseSubobject Base,
254                         const CXXRecordDecl *VTableClass) override;
255 
256   llvm::Value *getVTableAddressPointInStructor(
257       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
258       BaseSubobject Base, const CXXRecordDecl *NearestVBase) override;
259 
260   llvm::Value *getVTableAddressPointInStructorWithVTT(
261       CodeGenFunction &CGF, const CXXRecordDecl *VTableClass,
262       BaseSubobject Base, const CXXRecordDecl *NearestVBase);
263 
264   llvm::Constant *
265   getVTableAddressPointForConstExpr(BaseSubobject Base,
266                                     const CXXRecordDecl *VTableClass) override;
267 
268   llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
269                                         CharUnits VPtrOffset) override;
270 
271   CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
272                                      Address This, llvm::Type *Ty,
273                                      SourceLocation Loc) override;
274 
275   llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
276                                          const CXXDestructorDecl *Dtor,
277                                          CXXDtorType DtorType, Address This,
278                                          DeleteOrMemberCallExpr E) override;
279 
280   void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override;
281 
282   bool canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const override;
283   bool canSpeculativelyEmitVTableAsBaseClass(const CXXRecordDecl *RD) const;
284 
285   void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD,
286                        bool ReturnAdjustment) override {
287     // Allow inlining of thunks by emitting them with available_externally
288     // linkage together with vtables when needed.
289     if (ForVTable && !Thunk->hasLocalLinkage())
290       Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
291     CGM.setGVProperties(Thunk, GD);
292   }
293 
294   bool exportThunk() override { return true; }
295 
296   llvm::Value *performThisAdjustment(CodeGenFunction &CGF, Address This,
297                                      const ThisAdjustment &TA) override;
298 
299   llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
300                                        const ReturnAdjustment &RA) override;
301 
302   size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
303                               FunctionArgList &Args) const override {
304     assert(!Args.empty() && "expected the arglist to not be empty!");
305     return Args.size() - 1;
306   }
307 
308   StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; }
309   StringRef GetDeletedVirtualCallName() override
310     { return "__cxa_deleted_virtual"; }
311 
312   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
313   Address InitializeArrayCookie(CodeGenFunction &CGF,
314                                 Address NewPtr,
315                                 llvm::Value *NumElements,
316                                 const CXXNewExpr *expr,
317                                 QualType ElementType) override;
318   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF,
319                                    Address allocPtr,
320                                    CharUnits cookieSize) override;
321 
322   void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
323                        llvm::GlobalVariable *DeclPtr,
324                        bool PerformInit) override;
325   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
326                           llvm::FunctionCallee dtor,
327                           llvm::Constant *addr) override;
328 
329   llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD,
330                                                 llvm::Value *Val);
331   void EmitThreadLocalInitFuncs(
332       CodeGenModule &CGM,
333       ArrayRef<const VarDecl *> CXXThreadLocals,
334       ArrayRef<llvm::Function *> CXXThreadLocalInits,
335       ArrayRef<const VarDecl *> CXXThreadLocalInitVars) override;
336 
337   bool usesThreadWrapperFunction(const VarDecl *VD) const override {
338     return !isEmittedWithConstantInitializer(VD) ||
339            mayNeedDestruction(VD);
340   }
341   LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD,
342                                       QualType LValType) override;
343 
344   bool NeedsVTTParameter(GlobalDecl GD) override;
345 
346   /**************************** RTTI Uniqueness ******************************/
347 
348 protected:
349   /// Returns true if the ABI requires RTTI type_info objects to be unique
350   /// across a program.
351   virtual bool shouldRTTIBeUnique() const { return true; }
352 
353 public:
354   /// What sort of unique-RTTI behavior should we use?
355   enum RTTIUniquenessKind {
356     /// We are guaranteeing, or need to guarantee, that the RTTI string
357     /// is unique.
358     RUK_Unique,
359 
360     /// We are not guaranteeing uniqueness for the RTTI string, so we
361     /// can demote to hidden visibility but must use string comparisons.
362     RUK_NonUniqueHidden,
363 
364     /// We are not guaranteeing uniqueness for the RTTI string, so we
365     /// have to use string comparisons, but we also have to emit it with
366     /// non-hidden visibility.
367     RUK_NonUniqueVisible
368   };
369 
370   /// Return the required visibility status for the given type and linkage in
371   /// the current ABI.
372   RTTIUniquenessKind
373   classifyRTTIUniqueness(QualType CanTy,
374                          llvm::GlobalValue::LinkageTypes Linkage) const;
375   friend class ItaniumRTTIBuilder;
376 
377   void emitCXXStructor(GlobalDecl GD) override;
378 
379   std::pair<llvm::Value *, const CXXRecordDecl *>
380   LoadVTablePtr(CodeGenFunction &CGF, Address This,
381                 const CXXRecordDecl *RD) override;
382 
383  private:
384    bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const {
385      const auto &VtableLayout =
386          CGM.getItaniumVTableContext().getVTableLayout(RD);
387 
388      for (const auto &VtableComponent : VtableLayout.vtable_components()) {
389        // Skip empty slot.
390        if (!VtableComponent.isUsedFunctionPointerKind())
391          continue;
392 
393        const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
394        if (!Method->getCanonicalDecl()->isInlined())
395          continue;
396 
397        StringRef Name = CGM.getMangledName(VtableComponent.getGlobalDecl());
398        auto *Entry = CGM.GetGlobalValue(Name);
399        // This checks if virtual inline function has already been emitted.
400        // Note that it is possible that this inline function would be emitted
401        // after trying to emit vtable speculatively. Because of this we do
402        // an extra pass after emitting all deferred vtables to find and emit
403        // these vtables opportunistically.
404        if (!Entry || Entry->isDeclaration())
405          return true;
406      }
407      return false;
408   }
409 
410   bool isVTableHidden(const CXXRecordDecl *RD) const {
411     const auto &VtableLayout =
412             CGM.getItaniumVTableContext().getVTableLayout(RD);
413 
414     for (const auto &VtableComponent : VtableLayout.vtable_components()) {
415       if (VtableComponent.isRTTIKind()) {
416         const CXXRecordDecl *RTTIDecl = VtableComponent.getRTTIDecl();
417         if (RTTIDecl->getVisibility() == Visibility::HiddenVisibility)
418           return true;
419       } else if (VtableComponent.isUsedFunctionPointerKind()) {
420         const CXXMethodDecl *Method = VtableComponent.getFunctionDecl();
421         if (Method->getVisibility() == Visibility::HiddenVisibility &&
422             !Method->isDefined())
423           return true;
424       }
425     }
426     return false;
427   }
428 };
429 
430 class ARMCXXABI : public ItaniumCXXABI {
431 public:
432   ARMCXXABI(CodeGen::CodeGenModule &CGM) :
433     ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
434                   /*UseARMGuardVarABI=*/true) {}
435 
436   bool HasThisReturn(GlobalDecl GD) const override {
437     return (isa<CXXConstructorDecl>(GD.getDecl()) || (
438               isa<CXXDestructorDecl>(GD.getDecl()) &&
439               GD.getDtorType() != Dtor_Deleting));
440   }
441 
442   void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV,
443                            QualType ResTy) override;
444 
445   CharUnits getArrayCookieSizeImpl(QualType elementType) override;
446   Address InitializeArrayCookie(CodeGenFunction &CGF,
447                                 Address NewPtr,
448                                 llvm::Value *NumElements,
449                                 const CXXNewExpr *expr,
450                                 QualType ElementType) override;
451   llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, Address allocPtr,
452                                    CharUnits cookieSize) override;
453 };
454 
455 class AppleARM64CXXABI : public ARMCXXABI {
456 public:
457   AppleARM64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
458     Use32BitVTableOffsetABI = true;
459   }
460 
461   // ARM64 libraries are prepared for non-unique RTTI.
462   bool shouldRTTIBeUnique() const override { return false; }
463 };
464 
465 class FuchsiaCXXABI final : public ItaniumCXXABI {
466 public:
467   explicit FuchsiaCXXABI(CodeGen::CodeGenModule &CGM)
468       : ItaniumCXXABI(CGM) {}
469 
470 private:
471   bool HasThisReturn(GlobalDecl GD) const override {
472     return isa<CXXConstructorDecl>(GD.getDecl()) ||
473            (isa<CXXDestructorDecl>(GD.getDecl()) &&
474             GD.getDtorType() != Dtor_Deleting);
475   }
476 };
477 
478 class WebAssemblyCXXABI final : public ItaniumCXXABI {
479 public:
480   explicit WebAssemblyCXXABI(CodeGen::CodeGenModule &CGM)
481       : ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
482                       /*UseARMGuardVarABI=*/true) {}
483   void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
484   llvm::CallInst *
485   emitTerminateForUnexpectedException(CodeGenFunction &CGF,
486                                       llvm::Value *Exn) override;
487 
488 private:
489   bool HasThisReturn(GlobalDecl GD) const override {
490     return isa<CXXConstructorDecl>(GD.getDecl()) ||
491            (isa<CXXDestructorDecl>(GD.getDecl()) &&
492             GD.getDtorType() != Dtor_Deleting);
493   }
494   bool canCallMismatchedFunctionType() const override { return false; }
495 };
496 
497 class XLCXXABI final : public ItaniumCXXABI {
498 public:
499   explicit XLCXXABI(CodeGen::CodeGenModule &CGM)
500       : ItaniumCXXABI(CGM) {}
501 
502   void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
503                           llvm::FunctionCallee dtor,
504                           llvm::Constant *addr) override;
505 
506   bool useSinitAndSterm() const override { return true; }
507 
508 private:
509   void emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
510                              llvm::Constant *addr);
511 };
512 }
513 
514 CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
515   switch (CGM.getContext().getCXXABIKind()) {
516   // For IR-generation purposes, there's no significant difference
517   // between the ARM and iOS ABIs.
518   case TargetCXXABI::GenericARM:
519   case TargetCXXABI::iOS:
520   case TargetCXXABI::WatchOS:
521     return new ARMCXXABI(CGM);
522 
523   case TargetCXXABI::AppleARM64:
524     return new AppleARM64CXXABI(CGM);
525 
526   case TargetCXXABI::Fuchsia:
527     return new FuchsiaCXXABI(CGM);
528 
529   // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't
530   // include the other 32-bit ARM oddities: constructor/destructor return values
531   // and array cookies.
532   case TargetCXXABI::GenericAArch64:
533     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
534                              /*UseARMGuardVarABI=*/true);
535 
536   case TargetCXXABI::GenericMIPS:
537     return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
538 
539   case TargetCXXABI::WebAssembly:
540     return new WebAssemblyCXXABI(CGM);
541 
542   case TargetCXXABI::XL:
543     return new XLCXXABI(CGM);
544 
545   case TargetCXXABI::GenericItanium:
546     if (CGM.getContext().getTargetInfo().getTriple().getArch()
547         == llvm::Triple::le32) {
548       // For PNaCl, use ARM-style method pointers so that PNaCl code
549       // does not assume anything about the alignment of function
550       // pointers.
551       return new ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true);
552     }
553     return new ItaniumCXXABI(CGM);
554 
555   case TargetCXXABI::Microsoft:
556     llvm_unreachable("Microsoft ABI is not Itanium-based");
557   }
558   llvm_unreachable("bad ABI kind");
559 }
560 
561 llvm::Type *
562 ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
563   if (MPT->isMemberDataPointer())
564     return CGM.PtrDiffTy;
565   return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy);
566 }
567 
568 /// In the Itanium and ARM ABIs, method pointers have the form:
569 ///   struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
570 ///
571 /// In the Itanium ABI:
572 ///  - method pointers are virtual if (memptr.ptr & 1) is nonzero
573 ///  - the this-adjustment is (memptr.adj)
574 ///  - the virtual offset is (memptr.ptr - 1)
575 ///
576 /// In the ARM ABI:
577 ///  - method pointers are virtual if (memptr.adj & 1) is nonzero
578 ///  - the this-adjustment is (memptr.adj >> 1)
579 ///  - the virtual offset is (memptr.ptr)
580 /// ARM uses 'adj' for the virtual flag because Thumb functions
581 /// may be only single-byte aligned.
582 ///
583 /// If the member is virtual, the adjusted 'this' pointer points
584 /// to a vtable pointer from which the virtual offset is applied.
585 ///
586 /// If the member is non-virtual, memptr.ptr is the address of
587 /// the function to call.
588 CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
589     CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
590     llvm::Value *&ThisPtrForCall,
591     llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
592   CGBuilderTy &Builder = CGF.Builder;
593 
594   const FunctionProtoType *FPT =
595     MPT->getPointeeType()->getAs<FunctionProtoType>();
596   auto *RD =
597       cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
598 
599   llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
600       CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
601 
602   llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1);
603 
604   llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
605   llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
606   llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
607 
608   // Extract memptr.adj, which is in the second field.
609   llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
610 
611   // Compute the true adjustment.
612   llvm::Value *Adj = RawAdj;
613   if (UseARMMethodPtrABI)
614     Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
615 
616   // Apply the adjustment and cast back to the original struct type
617   // for consistency.
618   llvm::Value *This = ThisAddr.getPointer();
619   llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
620   Ptr = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Ptr, Adj);
621   This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
622   ThisPtrForCall = This;
623 
624   // Load the function pointer.
625   llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
626 
627   // If the LSB in the function pointer is 1, the function pointer points to
628   // a virtual function.
629   llvm::Value *IsVirtual;
630   if (UseARMMethodPtrABI)
631     IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
632   else
633     IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
634   IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
635   Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
636 
637   // In the virtual path, the adjustment left 'This' pointing to the
638   // vtable of the correct base subobject.  The "function pointer" is an
639   // offset within the vtable (+1 for the virtual flag on non-ARM).
640   CGF.EmitBlock(FnVirtual);
641 
642   // Cast the adjusted this to a pointer to vtable pointer and load.
643   llvm::Type *VTableTy = Builder.getInt8PtrTy();
644   CharUnits VTablePtrAlign =
645     CGF.CGM.getDynamicOffsetAlignment(ThisAddr.getAlignment(), RD,
646                                       CGF.getPointerAlign());
647   llvm::Value *VTable = CGF.GetVTablePtr(
648       Address(This, ThisAddr.getElementType(), VTablePtrAlign), VTableTy, RD);
649 
650   // Apply the offset.
651   // On ARM64, to reserve extra space in virtual member function pointers,
652   // we only pay attention to the low 32 bits of the offset.
653   llvm::Value *VTableOffset = FnAsInt;
654   if (!UseARMMethodPtrABI)
655     VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
656   if (Use32BitVTableOffsetABI) {
657     VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
658     VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
659   }
660 
661   // Check the address of the function pointer if CFI on member function
662   // pointers is enabled.
663   llvm::Constant *CheckSourceLocation;
664   llvm::Constant *CheckTypeDesc;
665   bool ShouldEmitCFICheck = CGF.SanOpts.has(SanitizerKind::CFIMFCall) &&
666                             CGM.HasHiddenLTOVisibility(RD);
667   bool ShouldEmitVFEInfo = CGM.getCodeGenOpts().VirtualFunctionElimination &&
668                            CGM.HasHiddenLTOVisibility(RD);
669   bool ShouldEmitWPDInfo =
670       CGM.getCodeGenOpts().WholeProgramVTables &&
671       // Don't insert type tests if we are forcing public visibility.
672       !CGM.AlwaysHasLTOVisibilityPublic(RD);
673   llvm::Value *VirtualFn = nullptr;
674 
675   {
676     CodeGenFunction::SanitizerScope SanScope(&CGF);
677     llvm::Value *TypeId = nullptr;
678     llvm::Value *CheckResult = nullptr;
679 
680     if (ShouldEmitCFICheck || ShouldEmitVFEInfo || ShouldEmitWPDInfo) {
681       // If doing CFI, VFE or WPD, we will need the metadata node to check
682       // against.
683       llvm::Metadata *MD =
684           CGM.CreateMetadataIdentifierForVirtualMemPtrType(QualType(MPT, 0));
685       TypeId = llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
686     }
687 
688     if (ShouldEmitVFEInfo) {
689       llvm::Value *VFPAddr =
690           Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
691 
692       // If doing VFE, load from the vtable with a type.checked.load intrinsic
693       // call. Note that we use the GEP to calculate the address to load from
694       // and pass 0 as the offset to the intrinsic. This is because every
695       // vtable slot of the correct type is marked with matching metadata, and
696       // we know that the load must be from one of these slots.
697       llvm::Value *CheckedLoad = Builder.CreateCall(
698           CGM.getIntrinsic(llvm::Intrinsic::type_checked_load),
699           {VFPAddr, llvm::ConstantInt::get(CGM.Int32Ty, 0), TypeId});
700       CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
701       VirtualFn = Builder.CreateExtractValue(CheckedLoad, 0);
702       VirtualFn = Builder.CreateBitCast(VirtualFn, FTy->getPointerTo(),
703                                         "memptr.virtualfn");
704     } else {
705       // When not doing VFE, emit a normal load, as it allows more
706       // optimisations than type.checked.load.
707       if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
708         llvm::Value *VFPAddr =
709             Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
710         CheckResult = Builder.CreateCall(
711             CGM.getIntrinsic(llvm::Intrinsic::type_test),
712             {Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
713       }
714 
715       if (CGM.getItaniumVTableContext().isRelativeLayout()) {
716         VirtualFn = CGF.Builder.CreateCall(
717             CGM.getIntrinsic(llvm::Intrinsic::load_relative,
718                              {VTableOffset->getType()}),
719             {VTable, VTableOffset});
720         VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
721       } else {
722         llvm::Value *VFPAddr =
723             CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
724         VFPAddr = CGF.Builder.CreateBitCast(
725             VFPAddr, FTy->getPointerTo()->getPointerTo());
726         VirtualFn = CGF.Builder.CreateAlignedLoad(
727             FTy->getPointerTo(), VFPAddr, CGF.getPointerAlign(),
728             "memptr.virtualfn");
729       }
730     }
731     assert(VirtualFn && "Virtual fuction pointer not created!");
732     assert((!ShouldEmitCFICheck || !ShouldEmitVFEInfo || !ShouldEmitWPDInfo ||
733             CheckResult) &&
734            "Check result required but not created!");
735 
736     if (ShouldEmitCFICheck) {
737       // If doing CFI, emit the check.
738       CheckSourceLocation = CGF.EmitCheckSourceLocation(E->getBeginLoc());
739       CheckTypeDesc = CGF.EmitCheckTypeDescriptor(QualType(MPT, 0));
740       llvm::Constant *StaticData[] = {
741           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_VMFCall),
742           CheckSourceLocation,
743           CheckTypeDesc,
744       };
745 
746       if (CGM.getCodeGenOpts().SanitizeTrap.has(SanitizerKind::CFIMFCall)) {
747         CGF.EmitTrapCheck(CheckResult, SanitizerHandler::CFICheckFail);
748       } else {
749         llvm::Value *AllVtables = llvm::MetadataAsValue::get(
750             CGM.getLLVMContext(),
751             llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
752         llvm::Value *ValidVtable = Builder.CreateCall(
753             CGM.getIntrinsic(llvm::Intrinsic::type_test), {VTable, AllVtables});
754         CGF.EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIMFCall),
755                       SanitizerHandler::CFICheckFail, StaticData,
756                       {VTable, ValidVtable});
757       }
758 
759       FnVirtual = Builder.GetInsertBlock();
760     }
761   } // End of sanitizer scope
762 
763   CGF.EmitBranch(FnEnd);
764 
765   // In the non-virtual path, the function pointer is actually a
766   // function pointer.
767   CGF.EmitBlock(FnNonVirtual);
768   llvm::Value *NonVirtualFn =
769     Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
770 
771   // Check the function pointer if CFI on member function pointers is enabled.
772   if (ShouldEmitCFICheck) {
773     CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
774     if (RD->hasDefinition()) {
775       CodeGenFunction::SanitizerScope SanScope(&CGF);
776 
777       llvm::Constant *StaticData[] = {
778           llvm::ConstantInt::get(CGF.Int8Ty, CodeGenFunction::CFITCK_NVMFCall),
779           CheckSourceLocation,
780           CheckTypeDesc,
781       };
782 
783       llvm::Value *Bit = Builder.getFalse();
784       llvm::Value *CastedNonVirtualFn =
785           Builder.CreateBitCast(NonVirtualFn, CGF.Int8PtrTy);
786       for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
787         llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
788             getContext().getMemberPointerType(
789                 MPT->getPointeeType(),
790                 getContext().getRecordType(Base).getTypePtr()));
791         llvm::Value *TypeId =
792             llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
793 
794         llvm::Value *TypeTest =
795             Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
796                                {CastedNonVirtualFn, TypeId});
797         Bit = Builder.CreateOr(Bit, TypeTest);
798       }
799 
800       CGF.EmitCheck(std::make_pair(Bit, SanitizerKind::CFIMFCall),
801                     SanitizerHandler::CFICheckFail, StaticData,
802                     {CastedNonVirtualFn, llvm::UndefValue::get(CGF.IntPtrTy)});
803 
804       FnNonVirtual = Builder.GetInsertBlock();
805     }
806   }
807 
808   // We're done.
809   CGF.EmitBlock(FnEnd);
810   llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
811   CalleePtr->addIncoming(VirtualFn, FnVirtual);
812   CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
813 
814   CGCallee Callee(FPT, CalleePtr);
815   return Callee;
816 }
817 
818 /// Compute an l-value by applying the given pointer-to-member to a
819 /// base object.
820 llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
821     CodeGenFunction &CGF, const Expr *E, Address Base, llvm::Value *MemPtr,
822     const MemberPointerType *MPT) {
823   assert(MemPtr->getType() == CGM.PtrDiffTy);
824 
825   CGBuilderTy &Builder = CGF.Builder;
826 
827   // Cast to char*.
828   Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
829 
830   // Apply the offset, which we assume is non-null.
831   llvm::Value *Addr = Builder.CreateInBoundsGEP(
832       Base.getElementType(), Base.getPointer(), MemPtr, "memptr.offset");
833 
834   // Cast the address to the appropriate pointer type, adopting the
835   // address space of the base pointer.
836   llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())
837                             ->getPointerTo(Base.getAddressSpace());
838   return Builder.CreateBitCast(Addr, PType);
839 }
840 
841 /// Perform a bitcast, derived-to-base, or base-to-derived member pointer
842 /// conversion.
843 ///
844 /// Bitcast conversions are always a no-op under Itanium.
845 ///
846 /// Obligatory offset/adjustment diagram:
847 ///         <-- offset -->          <-- adjustment -->
848 ///   |--------------------------|----------------------|--------------------|
849 ///   ^Derived address point     ^Base address point    ^Member address point
850 ///
851 /// So when converting a base member pointer to a derived member pointer,
852 /// we add the offset to the adjustment because the address point has
853 /// decreased;  and conversely, when converting a derived MP to a base MP
854 /// we subtract the offset from the adjustment because the address point
855 /// has increased.
856 ///
857 /// The standard forbids (at compile time) conversion to and from
858 /// virtual bases, which is why we don't have to consider them here.
859 ///
860 /// The standard forbids (at run time) casting a derived MP to a base
861 /// MP when the derived MP does not point to a member of the base.
862 /// This is why -1 is a reasonable choice for null data member
863 /// pointers.
864 llvm::Value *
865 ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
866                                            const CastExpr *E,
867                                            llvm::Value *src) {
868   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
869          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
870          E->getCastKind() == CK_ReinterpretMemberPointer);
871 
872   // Under Itanium, reinterprets don't require any additional processing.
873   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
874 
875   // Use constant emission if we can.
876   if (isa<llvm::Constant>(src))
877     return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
878 
879   llvm::Constant *adj = getMemberPointerAdjustment(E);
880   if (!adj) return src;
881 
882   CGBuilderTy &Builder = CGF.Builder;
883   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
884 
885   const MemberPointerType *destTy =
886     E->getType()->castAs<MemberPointerType>();
887 
888   // For member data pointers, this is just a matter of adding the
889   // offset if the source is non-null.
890   if (destTy->isMemberDataPointer()) {
891     llvm::Value *dst;
892     if (isDerivedToBase)
893       dst = Builder.CreateNSWSub(src, adj, "adj");
894     else
895       dst = Builder.CreateNSWAdd(src, adj, "adj");
896 
897     // Null check.
898     llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
899     llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
900     return Builder.CreateSelect(isNull, src, dst);
901   }
902 
903   // The this-adjustment is left-shifted by 1 on ARM.
904   if (UseARMMethodPtrABI) {
905     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
906     offset <<= 1;
907     adj = llvm::ConstantInt::get(adj->getType(), offset);
908   }
909 
910   llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
911   llvm::Value *dstAdj;
912   if (isDerivedToBase)
913     dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
914   else
915     dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
916 
917   return Builder.CreateInsertValue(src, dstAdj, 1);
918 }
919 
920 llvm::Constant *
921 ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
922                                            llvm::Constant *src) {
923   assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
924          E->getCastKind() == CK_BaseToDerivedMemberPointer ||
925          E->getCastKind() == CK_ReinterpretMemberPointer);
926 
927   // Under Itanium, reinterprets don't require any additional processing.
928   if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
929 
930   // If the adjustment is trivial, we don't need to do anything.
931   llvm::Constant *adj = getMemberPointerAdjustment(E);
932   if (!adj) return src;
933 
934   bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
935 
936   const MemberPointerType *destTy =
937     E->getType()->castAs<MemberPointerType>();
938 
939   // For member data pointers, this is just a matter of adding the
940   // offset if the source is non-null.
941   if (destTy->isMemberDataPointer()) {
942     // null maps to null.
943     if (src->isAllOnesValue()) return src;
944 
945     if (isDerivedToBase)
946       return llvm::ConstantExpr::getNSWSub(src, adj);
947     else
948       return llvm::ConstantExpr::getNSWAdd(src, adj);
949   }
950 
951   // The this-adjustment is left-shifted by 1 on ARM.
952   if (UseARMMethodPtrABI) {
953     uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
954     offset <<= 1;
955     adj = llvm::ConstantInt::get(adj->getType(), offset);
956   }
957 
958   llvm::Constant *srcAdj = src->getAggregateElement(1);
959   llvm::Constant *dstAdj;
960   if (isDerivedToBase)
961     dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
962   else
963     dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
964 
965   llvm::Constant *res = ConstantFoldInsertValueInstruction(src, dstAdj, 1);
966   assert(res != nullptr && "Folding must succeed");
967   return res;
968 }
969 
970 llvm::Constant *
971 ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
972   // Itanium C++ ABI 2.3:
973   //   A NULL pointer is represented as -1.
974   if (MPT->isMemberDataPointer())
975     return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true);
976 
977   llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0);
978   llvm::Constant *Values[2] = { Zero, Zero };
979   return llvm::ConstantStruct::getAnon(Values);
980 }
981 
982 llvm::Constant *
983 ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
984                                      CharUnits offset) {
985   // Itanium C++ ABI 2.3:
986   //   A pointer to data member is an offset from the base address of
987   //   the class object containing it, represented as a ptrdiff_t
988   return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity());
989 }
990 
991 llvm::Constant *
992 ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
993   return BuildMemberPointer(MD, CharUnits::Zero());
994 }
995 
996 llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
997                                                   CharUnits ThisAdjustment) {
998   assert(MD->isInstance() && "Member function must not be static!");
999 
1000   CodeGenTypes &Types = CGM.getTypes();
1001 
1002   // Get the function pointer (or index if this is a virtual function).
1003   llvm::Constant *MemPtr[2];
1004   if (MD->isVirtual()) {
1005     uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD);
1006     uint64_t VTableOffset;
1007     if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1008       // Multiply by 4-byte relative offsets.
1009       VTableOffset = Index * 4;
1010     } else {
1011       const ASTContext &Context = getContext();
1012       CharUnits PointerWidth = Context.toCharUnitsFromBits(
1013           Context.getTargetInfo().getPointerWidth(0));
1014       VTableOffset = Index * PointerWidth.getQuantity();
1015     }
1016 
1017     if (UseARMMethodPtrABI) {
1018       // ARM C++ ABI 3.2.1:
1019       //   This ABI specifies that adj contains twice the this
1020       //   adjustment, plus 1 if the member function is virtual. The
1021       //   least significant bit of adj then makes exactly the same
1022       //   discrimination as the least significant bit of ptr does for
1023       //   Itanium.
1024       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset);
1025       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1026                                          2 * ThisAdjustment.getQuantity() + 1);
1027     } else {
1028       // Itanium C++ ABI 2.3:
1029       //   For a virtual function, [the pointer field] is 1 plus the
1030       //   virtual table offset (in bytes) of the function,
1031       //   represented as a ptrdiff_t.
1032       MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1);
1033       MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1034                                          ThisAdjustment.getQuantity());
1035     }
1036   } else {
1037     const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1038     llvm::Type *Ty;
1039     // Check whether the function has a computable LLVM signature.
1040     if (Types.isFuncTypeConvertible(FPT)) {
1041       // The function has a computable LLVM signature; use the correct type.
1042       Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
1043     } else {
1044       // Use an arbitrary non-function type to tell GetAddrOfFunction that the
1045       // function type is incomplete.
1046       Ty = CGM.PtrDiffTy;
1047     }
1048     llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
1049 
1050     MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy);
1051     MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy,
1052                                        (UseARMMethodPtrABI ? 2 : 1) *
1053                                        ThisAdjustment.getQuantity());
1054   }
1055 
1056   return llvm::ConstantStruct::getAnon(MemPtr);
1057 }
1058 
1059 llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
1060                                                  QualType MPType) {
1061   const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
1062   const ValueDecl *MPD = MP.getMemberPointerDecl();
1063   if (!MPD)
1064     return EmitNullMemberPointer(MPT);
1065 
1066   CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP);
1067 
1068   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
1069     return BuildMemberPointer(MD, ThisAdjustment);
1070 
1071   CharUnits FieldOffset =
1072     getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
1073   return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
1074 }
1075 
1076 /// The comparison algorithm is pretty easy: the member pointers are
1077 /// the same if they're either bitwise identical *or* both null.
1078 ///
1079 /// ARM is different here only because null-ness is more complicated.
1080 llvm::Value *
1081 ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
1082                                            llvm::Value *L,
1083                                            llvm::Value *R,
1084                                            const MemberPointerType *MPT,
1085                                            bool Inequality) {
1086   CGBuilderTy &Builder = CGF.Builder;
1087 
1088   llvm::ICmpInst::Predicate Eq;
1089   llvm::Instruction::BinaryOps And, Or;
1090   if (Inequality) {
1091     Eq = llvm::ICmpInst::ICMP_NE;
1092     And = llvm::Instruction::Or;
1093     Or = llvm::Instruction::And;
1094   } else {
1095     Eq = llvm::ICmpInst::ICMP_EQ;
1096     And = llvm::Instruction::And;
1097     Or = llvm::Instruction::Or;
1098   }
1099 
1100   // Member data pointers are easy because there's a unique null
1101   // value, so it just comes down to bitwise equality.
1102   if (MPT->isMemberDataPointer())
1103     return Builder.CreateICmp(Eq, L, R);
1104 
1105   // For member function pointers, the tautologies are more complex.
1106   // The Itanium tautology is:
1107   //   (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
1108   // The ARM tautology is:
1109   //   (L == R) <==> (L.ptr == R.ptr &&
1110   //                  (L.adj == R.adj ||
1111   //                   (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
1112   // The inequality tautologies have exactly the same structure, except
1113   // applying De Morgan's laws.
1114 
1115   llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
1116   llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
1117 
1118   // This condition tests whether L.ptr == R.ptr.  This must always be
1119   // true for equality to hold.
1120   llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
1121 
1122   // This condition, together with the assumption that L.ptr == R.ptr,
1123   // tests whether the pointers are both null.  ARM imposes an extra
1124   // condition.
1125   llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
1126   llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
1127 
1128   // This condition tests whether L.adj == R.adj.  If this isn't
1129   // true, the pointers are unequal unless they're both null.
1130   llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
1131   llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
1132   llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
1133 
1134   // Null member function pointers on ARM clear the low bit of Adj,
1135   // so the zero condition has to check that neither low bit is set.
1136   if (UseARMMethodPtrABI) {
1137     llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
1138 
1139     // Compute (l.adj | r.adj) & 1 and test it against zero.
1140     llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
1141     llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
1142     llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
1143                                                       "cmp.or.adj");
1144     EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
1145   }
1146 
1147   // Tie together all our conditions.
1148   llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
1149   Result = Builder.CreateBinOp(And, PtrEq, Result,
1150                                Inequality ? "memptr.ne" : "memptr.eq");
1151   return Result;
1152 }
1153 
1154 llvm::Value *
1155 ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
1156                                           llvm::Value *MemPtr,
1157                                           const MemberPointerType *MPT) {
1158   CGBuilderTy &Builder = CGF.Builder;
1159 
1160   /// For member data pointers, this is just a check against -1.
1161   if (MPT->isMemberDataPointer()) {
1162     assert(MemPtr->getType() == CGM.PtrDiffTy);
1163     llvm::Value *NegativeOne =
1164       llvm::Constant::getAllOnesValue(MemPtr->getType());
1165     return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
1166   }
1167 
1168   // In Itanium, a member function pointer is not null if 'ptr' is not null.
1169   llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
1170 
1171   llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
1172   llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
1173 
1174   // On ARM, a member function pointer is also non-null if the low bit of 'adj'
1175   // (the virtual bit) is set.
1176   if (UseARMMethodPtrABI) {
1177     llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
1178     llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
1179     llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
1180     llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
1181                                                   "memptr.isvirtual");
1182     Result = Builder.CreateOr(Result, IsVirtual);
1183   }
1184 
1185   return Result;
1186 }
1187 
1188 bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const {
1189   const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl();
1190   if (!RD)
1191     return false;
1192 
1193   // If C++ prohibits us from making a copy, return by address.
1194   if (!RD->canPassInRegisters()) {
1195     auto Align = CGM.getContext().getTypeAlignInChars(FI.getReturnType());
1196     FI.getReturnInfo() = ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
1197     return true;
1198   }
1199   return false;
1200 }
1201 
1202 /// The Itanium ABI requires non-zero initialization only for data
1203 /// member pointers, for which '0' is a valid offset.
1204 bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
1205   return MPT->isMemberFunctionPointer();
1206 }
1207 
1208 /// The Itanium ABI always places an offset to the complete object
1209 /// at entry -2 in the vtable.
1210 void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
1211                                             const CXXDeleteExpr *DE,
1212                                             Address Ptr,
1213                                             QualType ElementType,
1214                                             const CXXDestructorDecl *Dtor) {
1215   bool UseGlobalDelete = DE->isGlobalDelete();
1216   if (UseGlobalDelete) {
1217     // Derive the complete-object pointer, which is what we need
1218     // to pass to the deallocation function.
1219 
1220     // Grab the vtable pointer as an intptr_t*.
1221     auto *ClassDecl =
1222         cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
1223     llvm::Value *VTable =
1224         CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo(), ClassDecl);
1225 
1226     // Track back to entry -2 and pull out the offset there.
1227     llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1228         CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
1229     llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr,                                                        CGF.getPointerAlign());
1230 
1231     // Apply the offset.
1232     llvm::Value *CompletePtr =
1233       CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
1234     CompletePtr =
1235         CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
1236 
1237     // If we're supposed to call the global delete, make sure we do so
1238     // even if the destructor throws.
1239     CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr,
1240                                     ElementType);
1241   }
1242 
1243   // FIXME: Provide a source location here even though there's no
1244   // CXXMemberCallExpr for dtor call.
1245   CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
1246   EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, DE);
1247 
1248   if (UseGlobalDelete)
1249     CGF.PopCleanupBlock();
1250 }
1251 
1252 void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
1253   // void __cxa_rethrow();
1254 
1255   llvm::FunctionType *FTy =
1256     llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
1257 
1258   llvm::FunctionCallee Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
1259 
1260   if (isNoReturn)
1261     CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None);
1262   else
1263     CGF.EmitRuntimeCallOrInvoke(Fn);
1264 }
1265 
1266 static llvm::FunctionCallee getAllocateExceptionFn(CodeGenModule &CGM) {
1267   // void *__cxa_allocate_exception(size_t thrown_size);
1268 
1269   llvm::FunctionType *FTy =
1270     llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*isVarArg=*/false);
1271 
1272   return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
1273 }
1274 
1275 static llvm::FunctionCallee getThrowFn(CodeGenModule &CGM) {
1276   // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
1277   //                  void (*dest) (void *));
1278 
1279   llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy };
1280   llvm::FunctionType *FTy =
1281     llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
1282 
1283   return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
1284 }
1285 
1286 void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
1287   QualType ThrowType = E->getSubExpr()->getType();
1288   // Now allocate the exception object.
1289   llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType());
1290   uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
1291 
1292   llvm::FunctionCallee AllocExceptionFn = getAllocateExceptionFn(CGM);
1293   llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall(
1294       AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception");
1295 
1296   CharUnits ExnAlign = CGF.getContext().getExnObjectAlignment();
1297   CGF.EmitAnyExprToExn(
1298       E->getSubExpr(), Address(ExceptionPtr, CGM.Int8Ty, ExnAlign));
1299 
1300   // Now throw the exception.
1301   llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
1302                                                          /*ForEH=*/true);
1303 
1304   // The address of the destructor.  If the exception type has a
1305   // trivial destructor (or isn't a record), we just pass null.
1306   llvm::Constant *Dtor = nullptr;
1307   if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
1308     CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
1309     if (!Record->hasTrivialDestructor()) {
1310       CXXDestructorDecl *DtorD = Record->getDestructor();
1311       Dtor = CGM.getAddrOfCXXStructor(GlobalDecl(DtorD, Dtor_Complete));
1312       Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy);
1313     }
1314   }
1315   if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy);
1316 
1317   llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor };
1318   CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args);
1319 }
1320 
1321 static llvm::FunctionCallee getItaniumDynamicCastFn(CodeGenFunction &CGF) {
1322   // void *__dynamic_cast(const void *sub,
1323   //                      const abi::__class_type_info *src,
1324   //                      const abi::__class_type_info *dst,
1325   //                      std::ptrdiff_t src2dst_offset);
1326 
1327   llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
1328   llvm::Type *PtrDiffTy =
1329     CGF.ConvertType(CGF.getContext().getPointerDiffType());
1330 
1331   llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
1332 
1333   llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
1334 
1335   // Mark the function as nounwind readonly.
1336   llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind,
1337                                             llvm::Attribute::ReadOnly };
1338   llvm::AttributeList Attrs = llvm::AttributeList::get(
1339       CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1340 
1341   return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs);
1342 }
1343 
1344 static llvm::FunctionCallee getBadCastFn(CodeGenFunction &CGF) {
1345   // void __cxa_bad_cast();
1346   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1347   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
1348 }
1349 
1350 /// Compute the src2dst_offset hint as described in the
1351 /// Itanium C++ ABI [2.9.7]
1352 static CharUnits computeOffsetHint(ASTContext &Context,
1353                                    const CXXRecordDecl *Src,
1354                                    const CXXRecordDecl *Dst) {
1355   CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
1356                      /*DetectVirtual=*/false);
1357 
1358   // If Dst is not derived from Src we can skip the whole computation below and
1359   // return that Src is not a public base of Dst.  Record all inheritance paths.
1360   if (!Dst->isDerivedFrom(Src, Paths))
1361     return CharUnits::fromQuantity(-2ULL);
1362 
1363   unsigned NumPublicPaths = 0;
1364   CharUnits Offset;
1365 
1366   // Now walk all possible inheritance paths.
1367   for (const CXXBasePath &Path : Paths) {
1368     if (Path.Access != AS_public)  // Ignore non-public inheritance.
1369       continue;
1370 
1371     ++NumPublicPaths;
1372 
1373     for (const CXXBasePathElement &PathElement : Path) {
1374       // If the path contains a virtual base class we can't give any hint.
1375       // -1: no hint.
1376       if (PathElement.Base->isVirtual())
1377         return CharUnits::fromQuantity(-1ULL);
1378 
1379       if (NumPublicPaths > 1) // Won't use offsets, skip computation.
1380         continue;
1381 
1382       // Accumulate the base class offsets.
1383       const ASTRecordLayout &L = Context.getASTRecordLayout(PathElement.Class);
1384       Offset += L.getBaseClassOffset(
1385           PathElement.Base->getType()->getAsCXXRecordDecl());
1386     }
1387   }
1388 
1389   // -2: Src is not a public base of Dst.
1390   if (NumPublicPaths == 0)
1391     return CharUnits::fromQuantity(-2ULL);
1392 
1393   // -3: Src is a multiple public base type but never a virtual base type.
1394   if (NumPublicPaths > 1)
1395     return CharUnits::fromQuantity(-3ULL);
1396 
1397   // Otherwise, the Src type is a unique public nonvirtual base type of Dst.
1398   // Return the offset of Src from the origin of Dst.
1399   return Offset;
1400 }
1401 
1402 static llvm::FunctionCallee getBadTypeidFn(CodeGenFunction &CGF) {
1403   // void __cxa_bad_typeid();
1404   llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
1405 
1406   return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
1407 }
1408 
1409 bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
1410                                               QualType SrcRecordTy) {
1411   return IsDeref;
1412 }
1413 
1414 void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) {
1415   llvm::FunctionCallee Fn = getBadTypeidFn(CGF);
1416   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1417   Call->setDoesNotReturn();
1418   CGF.Builder.CreateUnreachable();
1419 }
1420 
1421 llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
1422                                        QualType SrcRecordTy,
1423                                        Address ThisPtr,
1424                                        llvm::Type *StdTypeInfoPtrTy) {
1425   auto *ClassDecl =
1426       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1427   llvm::Value *Value =
1428       CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo(), ClassDecl);
1429 
1430   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1431     // Load the type info.
1432     Value = CGF.Builder.CreateBitCast(Value, CGM.Int8PtrTy);
1433     Value = CGF.Builder.CreateCall(
1434         CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1435         {Value, llvm::ConstantInt::get(CGM.Int32Ty, -4)});
1436 
1437     // Setup to dereference again since this is a proxy we accessed.
1438     Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
1439   } else {
1440     // Load the type info.
1441     Value =
1442         CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL);
1443   }
1444   return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
1445                                        CGF.getPointerAlign());
1446 }
1447 
1448 bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
1449                                                        QualType SrcRecordTy) {
1450   return SrcIsPtr;
1451 }
1452 
1453 llvm::Value *ItaniumCXXABI::EmitDynamicCastCall(
1454     CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
1455     QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) {
1456   llvm::Type *PtrDiffLTy =
1457       CGF.ConvertType(CGF.getContext().getPointerDiffType());
1458   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1459 
1460   llvm::Value *SrcRTTI =
1461       CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
1462   llvm::Value *DestRTTI =
1463       CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
1464 
1465   // Compute the offset hint.
1466   const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
1467   const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
1468   llvm::Value *OffsetHint = llvm::ConstantInt::get(
1469       PtrDiffLTy,
1470       computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity());
1471 
1472   // Emit the call to __dynamic_cast.
1473   llvm::Value *Value = ThisAddr.getPointer();
1474   Value = CGF.EmitCastToVoidPtr(Value);
1475 
1476   llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint};
1477   Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args);
1478   Value = CGF.Builder.CreateBitCast(Value, DestLTy);
1479 
1480   /// C++ [expr.dynamic.cast]p9:
1481   ///   A failed cast to reference type throws std::bad_cast
1482   if (DestTy->isReferenceType()) {
1483     llvm::BasicBlock *BadCastBlock =
1484         CGF.createBasicBlock("dynamic_cast.bad_cast");
1485 
1486     llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
1487     CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
1488 
1489     CGF.EmitBlock(BadCastBlock);
1490     EmitBadCastCall(CGF);
1491   }
1492 
1493   return Value;
1494 }
1495 
1496 llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
1497                                                   Address ThisAddr,
1498                                                   QualType SrcRecordTy,
1499                                                   QualType DestTy) {
1500   llvm::Type *DestLTy = CGF.ConvertType(DestTy);
1501   auto *ClassDecl =
1502       cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
1503   llvm::Value *OffsetToTop;
1504   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1505     // Get the vtable pointer.
1506     llvm::Value *VTable =
1507         CGF.GetVTablePtr(ThisAddr, CGM.Int32Ty->getPointerTo(), ClassDecl);
1508 
1509     // Get the offset-to-top from the vtable.
1510     OffsetToTop =
1511         CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U);
1512     OffsetToTop = CGF.Builder.CreateAlignedLoad(
1513         CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
1514   } else {
1515     llvm::Type *PtrDiffLTy =
1516         CGF.ConvertType(CGF.getContext().getPointerDiffType());
1517 
1518     // Get the vtable pointer.
1519     llvm::Value *VTable =
1520         CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
1521 
1522     // Get the offset-to-top from the vtable.
1523     OffsetToTop =
1524         CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL);
1525     OffsetToTop = CGF.Builder.CreateAlignedLoad(
1526         PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
1527   }
1528   // Finally, add the offset to the pointer.
1529   llvm::Value *Value = ThisAddr.getPointer();
1530   Value = CGF.EmitCastToVoidPtr(Value);
1531   Value = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, Value, OffsetToTop);
1532   return CGF.Builder.CreateBitCast(Value, DestLTy);
1533 }
1534 
1535 bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) {
1536   llvm::FunctionCallee Fn = getBadCastFn(CGF);
1537   llvm::CallBase *Call = CGF.EmitRuntimeCallOrInvoke(Fn);
1538   Call->setDoesNotReturn();
1539   CGF.Builder.CreateUnreachable();
1540   return true;
1541 }
1542 
1543 llvm::Value *
1544 ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
1545                                          Address This,
1546                                          const CXXRecordDecl *ClassDecl,
1547                                          const CXXRecordDecl *BaseClassDecl) {
1548   llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy, ClassDecl);
1549   CharUnits VBaseOffsetOffset =
1550       CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
1551                                                                BaseClassDecl);
1552   llvm::Value *VBaseOffsetPtr =
1553     CGF.Builder.CreateConstGEP1_64(
1554         CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(),
1555         "vbase.offset.ptr");
1556 
1557   llvm::Value *VBaseOffset;
1558   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1559     VBaseOffsetPtr =
1560         CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
1561     VBaseOffset = CGF.Builder.CreateAlignedLoad(
1562         CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
1563         "vbase.offset");
1564   } else {
1565     VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
1566                                                CGM.PtrDiffTy->getPointerTo());
1567     VBaseOffset = CGF.Builder.CreateAlignedLoad(
1568         CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
1569   }
1570   return VBaseOffset;
1571 }
1572 
1573 void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) {
1574   // Just make sure we're in sync with TargetCXXABI.
1575   assert(CGM.getTarget().getCXXABI().hasConstructorVariants());
1576 
1577   // The constructor used for constructing this as a base class;
1578   // ignores virtual bases.
1579   CGM.EmitGlobal(GlobalDecl(D, Ctor_Base));
1580 
1581   // The constructor used for constructing this as a complete class;
1582   // constructs the virtual bases, then calls the base constructor.
1583   if (!D->getParent()->isAbstract()) {
1584     // We don't need to emit the complete ctor if the class is abstract.
1585     CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete));
1586   }
1587 }
1588 
1589 CGCXXABI::AddedStructorArgCounts
1590 ItaniumCXXABI::buildStructorSignature(GlobalDecl GD,
1591                                       SmallVectorImpl<CanQualType> &ArgTys) {
1592   ASTContext &Context = getContext();
1593 
1594   // All parameters are already in place except VTT, which goes after 'this'.
1595   // These are Clang types, so we don't need to worry about sret yet.
1596 
1597   // Check if we need to add a VTT parameter (which has type void **).
1598   if ((isa<CXXConstructorDecl>(GD.getDecl()) ? GD.getCtorType() == Ctor_Base
1599                                              : GD.getDtorType() == Dtor_Base) &&
1600       cast<CXXMethodDecl>(GD.getDecl())->getParent()->getNumVBases() != 0) {
1601     ArgTys.insert(ArgTys.begin() + 1,
1602                   Context.getPointerType(Context.VoidPtrTy));
1603     return AddedStructorArgCounts::prefix(1);
1604   }
1605   return AddedStructorArgCounts{};
1606 }
1607 
1608 void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) {
1609   // The destructor used for destructing this as a base class; ignores
1610   // virtual bases.
1611   CGM.EmitGlobal(GlobalDecl(D, Dtor_Base));
1612 
1613   // The destructor used for destructing this as a most-derived class;
1614   // call the base destructor and then destructs any virtual bases.
1615   CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete));
1616 
1617   // The destructor in a virtual table is always a 'deleting'
1618   // destructor, which calls the complete destructor and then uses the
1619   // appropriate operator delete.
1620   if (D->isVirtual())
1621     CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting));
1622 }
1623 
1624 void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
1625                                               QualType &ResTy,
1626                                               FunctionArgList &Params) {
1627   const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
1628   assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD));
1629 
1630   // Check if we need a VTT parameter as well.
1631   if (NeedsVTTParameter(CGF.CurGD)) {
1632     ASTContext &Context = getContext();
1633 
1634     // FIXME: avoid the fake decl
1635     QualType T = Context.getPointerType(Context.VoidPtrTy);
1636     auto *VTTDecl = ImplicitParamDecl::Create(
1637         Context, /*DC=*/nullptr, MD->getLocation(), &Context.Idents.get("vtt"),
1638         T, ImplicitParamDecl::CXXVTT);
1639     Params.insert(Params.begin() + 1, VTTDecl);
1640     getStructorImplicitParamDecl(CGF) = VTTDecl;
1641   }
1642 }
1643 
1644 void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
1645   // Naked functions have no prolog.
1646   if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
1647     return;
1648 
1649   /// Initialize the 'this' slot. In the Itanium C++ ABI, no prologue
1650   /// adjustments are required, because they are all handled by thunks.
1651   setCXXABIThisValue(CGF, loadIncomingCXXThis(CGF));
1652 
1653   /// Initialize the 'vtt' slot if needed.
1654   if (getStructorImplicitParamDecl(CGF)) {
1655     getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad(
1656         CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt");
1657   }
1658 
1659   /// If this is a function that the ABI specifies returns 'this', initialize
1660   /// the return slot to 'this' at the start of the function.
1661   ///
1662   /// Unlike the setting of return types, this is done within the ABI
1663   /// implementation instead of by clients of CGCXXABI because:
1664   /// 1) getThisValue is currently protected
1665   /// 2) in theory, an ABI could implement 'this' returns some other way;
1666   ///    HasThisReturn only specifies a contract, not the implementation
1667   if (HasThisReturn(CGF.CurGD))
1668     CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
1669 }
1670 
1671 CGCXXABI::AddedStructorArgs ItaniumCXXABI::getImplicitConstructorArgs(
1672     CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type,
1673     bool ForVirtualBase, bool Delegating) {
1674   if (!NeedsVTTParameter(GlobalDecl(D, Type)))
1675     return AddedStructorArgs{};
1676 
1677   // Insert the implicit 'vtt' argument as the second argument.
1678   llvm::Value *VTT =
1679       CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating);
1680   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1681   return AddedStructorArgs::prefix({{VTT, VTTTy}});
1682 }
1683 
1684 llvm::Value *ItaniumCXXABI::getCXXDestructorImplicitParam(
1685     CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type,
1686     bool ForVirtualBase, bool Delegating) {
1687   GlobalDecl GD(DD, Type);
1688   return CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
1689 }
1690 
1691 void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
1692                                        const CXXDestructorDecl *DD,
1693                                        CXXDtorType Type, bool ForVirtualBase,
1694                                        bool Delegating, Address This,
1695                                        QualType ThisTy) {
1696   GlobalDecl GD(DD, Type);
1697   llvm::Value *VTT =
1698       getCXXDestructorImplicitParam(CGF, DD, Type, ForVirtualBase, Delegating);
1699   QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
1700 
1701   CGCallee Callee;
1702   if (getContext().getLangOpts().AppleKext &&
1703       Type != Dtor_Base && DD->isVirtual())
1704     Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
1705   else
1706     Callee = CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD), GD);
1707 
1708   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, VTT, VTTTy,
1709                             nullptr);
1710 }
1711 
1712 void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
1713                                           const CXXRecordDecl *RD) {
1714   llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits());
1715   if (VTable->hasInitializer())
1716     return;
1717 
1718   ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
1719   const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
1720   llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
1721   llvm::Constant *RTTI =
1722       CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
1723 
1724   // Create and set the initializer.
1725   ConstantInitBuilder builder(CGM);
1726   auto components = builder.beginStruct();
1727   CGVT.createVTableInitializer(components, VTLayout, RTTI,
1728                                llvm::GlobalValue::isLocalLinkage(Linkage));
1729   components.finishAndSetAsInitializer(VTable);
1730 
1731   // Set the correct linkage.
1732   VTable->setLinkage(Linkage);
1733 
1734   if (CGM.supportsCOMDAT() && VTable->isWeakForLinker())
1735     VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName()));
1736 
1737   // Set the right visibility.
1738   CGM.setGVProperties(VTable, RD);
1739 
1740   // If this is the magic class __cxxabiv1::__fundamental_type_info,
1741   // we will emit the typeinfo for the fundamental types. This is the
1742   // same behaviour as GCC.
1743   const DeclContext *DC = RD->getDeclContext();
1744   if (RD->getIdentifier() &&
1745       RD->getIdentifier()->isStr("__fundamental_type_info") &&
1746       isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
1747       cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
1748       DC->getParent()->isTranslationUnit())
1749     EmitFundamentalRTTIDescriptors(RD);
1750 
1751   // Always emit type metadata on non-available_externally definitions, and on
1752   // available_externally definitions if we are performing whole program
1753   // devirtualization. For WPD we need the type metadata on all vtable
1754   // definitions to ensure we associate derived classes with base classes
1755   // defined in headers but with a strong definition only in a shared library.
1756   if (!VTable->isDeclarationForLinker() ||
1757       CGM.getCodeGenOpts().WholeProgramVTables) {
1758     CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
1759     // For available_externally definitions, add the vtable to
1760     // @llvm.compiler.used so that it isn't deleted before whole program
1761     // analysis.
1762     if (VTable->isDeclarationForLinker()) {
1763       assert(CGM.getCodeGenOpts().WholeProgramVTables);
1764       CGM.addCompilerUsedGlobal(VTable);
1765     }
1766   }
1767 
1768   if (VTContext.isRelativeLayout() && !VTable->isDSOLocal())
1769     CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
1770 }
1771 
1772 bool ItaniumCXXABI::isVirtualOffsetNeededForVTableField(
1773     CodeGenFunction &CGF, CodeGenFunction::VPtr Vptr) {
1774   if (Vptr.NearestVBase == nullptr)
1775     return false;
1776   return NeedsVTTParameter(CGF.CurGD);
1777 }
1778 
1779 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor(
1780     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1781     const CXXRecordDecl *NearestVBase) {
1782 
1783   if ((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1784       NeedsVTTParameter(CGF.CurGD)) {
1785     return getVTableAddressPointInStructorWithVTT(CGF, VTableClass, Base,
1786                                                   NearestVBase);
1787   }
1788   return getVTableAddressPoint(Base, VTableClass);
1789 }
1790 
1791 llvm::Constant *
1792 ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
1793                                      const CXXRecordDecl *VTableClass) {
1794   llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
1795 
1796   // Find the appropriate vtable within the vtable group, and the address point
1797   // within that vtable.
1798   VTableLayout::AddressPointLocation AddressPoint =
1799       CGM.getItaniumVTableContext()
1800           .getVTableLayout(VTableClass)
1801           .getAddressPoint(Base);
1802   llvm::Value *Indices[] = {
1803     llvm::ConstantInt::get(CGM.Int32Ty, 0),
1804     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
1805     llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
1806   };
1807 
1808   return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
1809                                               Indices, /*InBounds=*/true,
1810                                               /*InRangeIndex=*/1);
1811 }
1812 
1813 // Check whether all the non-inline virtual methods for the class have the
1814 // specified attribute.
1815 template <typename T>
1816 static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) {
1817   bool FoundNonInlineVirtualMethodWithAttr = false;
1818   for (const auto *D : RD->noload_decls()) {
1819     if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
1820       if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() ||
1821           FD->doesThisDeclarationHaveABody())
1822         continue;
1823       if (!D->hasAttr<T>())
1824         return false;
1825       FoundNonInlineVirtualMethodWithAttr = true;
1826     }
1827   }
1828 
1829   // We didn't find any non-inline virtual methods missing the attribute.  We
1830   // will return true when we found at least one non-inline virtual with the
1831   // attribute.  (This lets our caller know that the attribute needs to be
1832   // propagated up to the vtable.)
1833   return FoundNonInlineVirtualMethodWithAttr;
1834 }
1835 
1836 llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
1837     CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
1838     const CXXRecordDecl *NearestVBase) {
1839   assert((Base.getBase()->getNumVBases() || NearestVBase != nullptr) &&
1840          NeedsVTTParameter(CGF.CurGD) && "This class doesn't have VTT");
1841 
1842   // Get the secondary vpointer index.
1843   uint64_t VirtualPointerIndex =
1844       CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
1845 
1846   /// Load the VTT.
1847   llvm::Value *VTT = CGF.LoadCXXVTT();
1848   if (VirtualPointerIndex)
1849     VTT = CGF.Builder.CreateConstInBoundsGEP1_64(
1850         CGF.VoidPtrTy, VTT, VirtualPointerIndex);
1851 
1852   // And load the address point from the VTT.
1853   return CGF.Builder.CreateAlignedLoad(CGF.VoidPtrTy, VTT,
1854                                        CGF.getPointerAlign());
1855 }
1856 
1857 llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
1858     BaseSubobject Base, const CXXRecordDecl *VTableClass) {
1859   return getVTableAddressPoint(Base, VTableClass);
1860 }
1861 
1862 llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
1863                                                      CharUnits VPtrOffset) {
1864   assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets");
1865 
1866   llvm::GlobalVariable *&VTable = VTables[RD];
1867   if (VTable)
1868     return VTable;
1869 
1870   // Queue up this vtable for possible deferred emission.
1871   CGM.addDeferredVTable(RD);
1872 
1873   SmallString<256> Name;
1874   llvm::raw_svector_ostream Out(Name);
1875   getMangleContext().mangleCXXVTable(RD, Out);
1876 
1877   const VTableLayout &VTLayout =
1878       CGM.getItaniumVTableContext().getVTableLayout(RD);
1879   llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
1880 
1881   // Use pointer alignment for the vtable. Otherwise we would align them based
1882   // on the size of the initializer which doesn't make sense as only single
1883   // values are read.
1884   unsigned PAlign = CGM.getItaniumVTableContext().isRelativeLayout()
1885                         ? 32
1886                         : CGM.getTarget().getPointerAlign(0);
1887 
1888   VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
1889       Name, VTableType, llvm::GlobalValue::ExternalLinkage,
1890       getContext().toCharUnitsFromBits(PAlign).getQuantity());
1891   VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1892 
1893   // In MS C++ if you have a class with virtual functions in which you are using
1894   // selective member import/export, then all virtual functions must be exported
1895   // unless they are inline, otherwise a link error will result. To match this
1896   // behavior, for such classes, we dllimport the vtable if it is defined
1897   // externally and all the non-inline virtual methods are marked dllimport, and
1898   // we dllexport the vtable if it is defined in this TU and all the non-inline
1899   // virtual methods are marked dllexport.
1900   if (CGM.getTarget().hasPS4DLLImportExport()) {
1901     if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
1902       if (CGM.getVTables().isVTableExternal(RD)) {
1903         if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
1904           VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
1905       } else {
1906         if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
1907           VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
1908       }
1909     }
1910   }
1911   CGM.setGVProperties(VTable, RD);
1912 
1913   return VTable;
1914 }
1915 
1916 CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
1917                                                   GlobalDecl GD,
1918                                                   Address This,
1919                                                   llvm::Type *Ty,
1920                                                   SourceLocation Loc) {
1921   llvm::Type *TyPtr = Ty->getPointerTo();
1922   auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
1923   llvm::Value *VTable = CGF.GetVTablePtr(
1924       This, TyPtr->getPointerTo(), MethodDecl->getParent());
1925 
1926   uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
1927   llvm::Value *VFunc;
1928   if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
1929     VFunc = CGF.EmitVTableTypeCheckedLoad(
1930         MethodDecl->getParent(), VTable, TyPtr,
1931         VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
1932   } else {
1933     CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
1934 
1935     llvm::Value *VFuncLoad;
1936     if (CGM.getItaniumVTableContext().isRelativeLayout()) {
1937       VTable = CGF.Builder.CreateBitCast(VTable, CGM.Int8PtrTy);
1938       llvm::Value *Load = CGF.Builder.CreateCall(
1939           CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
1940           {VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
1941       VFuncLoad = CGF.Builder.CreateBitCast(Load, TyPtr);
1942     } else {
1943       VTable =
1944           CGF.Builder.CreateBitCast(VTable, TyPtr->getPointerTo());
1945       llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
1946           TyPtr, VTable, VTableIndex, "vfn");
1947       VFuncLoad =
1948           CGF.Builder.CreateAlignedLoad(TyPtr, VTableSlotPtr,
1949                                         CGF.getPointerAlign());
1950     }
1951 
1952     // Add !invariant.load md to virtual function load to indicate that
1953     // function didn't change inside vtable.
1954     // It's safe to add it without -fstrict-vtable-pointers, but it would not
1955     // help in devirtualization because it will only matter if we will have 2
1956     // the same virtual function loads from the same vtable load, which won't
1957     // happen without enabled devirtualization with -fstrict-vtable-pointers.
1958     if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1959         CGM.getCodeGenOpts().StrictVTablePointers) {
1960       if (auto *VFuncLoadInstr = dyn_cast<llvm::Instruction>(VFuncLoad)) {
1961         VFuncLoadInstr->setMetadata(
1962             llvm::LLVMContext::MD_invariant_load,
1963             llvm::MDNode::get(CGM.getLLVMContext(),
1964                               llvm::ArrayRef<llvm::Metadata *>()));
1965       }
1966     }
1967     VFunc = VFuncLoad;
1968   }
1969 
1970   CGCallee Callee(GD, VFunc);
1971   return Callee;
1972 }
1973 
1974 llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
1975     CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType,
1976     Address This, DeleteOrMemberCallExpr E) {
1977   auto *CE = E.dyn_cast<const CXXMemberCallExpr *>();
1978   auto *D = E.dyn_cast<const CXXDeleteExpr *>();
1979   assert((CE != nullptr) ^ (D != nullptr));
1980   assert(CE == nullptr || CE->arg_begin() == CE->arg_end());
1981   assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete);
1982 
1983   GlobalDecl GD(Dtor, DtorType);
1984   const CGFunctionInfo *FInfo =
1985       &CGM.getTypes().arrangeCXXStructorDeclaration(GD);
1986   llvm::FunctionType *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
1987   CGCallee Callee = CGCallee::forVirtual(CE, GD, This, Ty);
1988 
1989   QualType ThisTy;
1990   if (CE) {
1991     ThisTy = CE->getObjectType();
1992   } else {
1993     ThisTy = D->getDestroyedType();
1994   }
1995 
1996   CGF.EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy, nullptr,
1997                             QualType(), nullptr);
1998   return nullptr;
1999 }
2000 
2001 void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
2002   CodeGenVTables &VTables = CGM.getVTables();
2003   llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD);
2004   VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD);
2005 }
2006 
2007 bool ItaniumCXXABI::canSpeculativelyEmitVTableAsBaseClass(
2008     const CXXRecordDecl *RD) const {
2009   // We don't emit available_externally vtables if we are in -fapple-kext mode
2010   // because kext mode does not permit devirtualization.
2011   if (CGM.getLangOpts().AppleKext)
2012     return false;
2013 
2014   // If the vtable is hidden then it is not safe to emit an available_externally
2015   // copy of vtable.
2016   if (isVTableHidden(RD))
2017     return false;
2018 
2019   if (CGM.getCodeGenOpts().ForceEmitVTables)
2020     return true;
2021 
2022   // If we don't have any not emitted inline virtual function then we are safe
2023   // to emit an available_externally copy of vtable.
2024   // FIXME we can still emit a copy of the vtable if we
2025   // can emit definition of the inline functions.
2026   if (hasAnyUnusedVirtualInlineFunction(RD))
2027     return false;
2028 
2029   // For a class with virtual bases, we must also be able to speculatively
2030   // emit the VTT, because CodeGen doesn't have separate notions of "can emit
2031   // the vtable" and "can emit the VTT". For a base subobject, this means we
2032   // need to be able to emit non-virtual base vtables.
2033   if (RD->getNumVBases()) {
2034     for (const auto &B : RD->bases()) {
2035       auto *BRD = B.getType()->getAsCXXRecordDecl();
2036       assert(BRD && "no class for base specifier");
2037       if (B.isVirtual() || !BRD->isDynamicClass())
2038         continue;
2039       if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2040         return false;
2041     }
2042   }
2043 
2044   return true;
2045 }
2046 
2047 bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
2048   if (!canSpeculativelyEmitVTableAsBaseClass(RD))
2049     return false;
2050 
2051   // For a complete-object vtable (or more specifically, for the VTT), we need
2052   // to be able to speculatively emit the vtables of all dynamic virtual bases.
2053   for (const auto &B : RD->vbases()) {
2054     auto *BRD = B.getType()->getAsCXXRecordDecl();
2055     assert(BRD && "no class for base specifier");
2056     if (!BRD->isDynamicClass())
2057       continue;
2058     if (!canSpeculativelyEmitVTableAsBaseClass(BRD))
2059       return false;
2060   }
2061 
2062   return true;
2063 }
2064 static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
2065                                           Address InitialPtr,
2066                                           int64_t NonVirtualAdjustment,
2067                                           int64_t VirtualAdjustment,
2068                                           bool IsReturnAdjustment) {
2069   if (!NonVirtualAdjustment && !VirtualAdjustment)
2070     return InitialPtr.getPointer();
2071 
2072   Address V = CGF.Builder.CreateElementBitCast(InitialPtr, CGF.Int8Ty);
2073 
2074   // In a base-to-derived cast, the non-virtual adjustment is applied first.
2075   if (NonVirtualAdjustment && !IsReturnAdjustment) {
2076     V = CGF.Builder.CreateConstInBoundsByteGEP(V,
2077                               CharUnits::fromQuantity(NonVirtualAdjustment));
2078   }
2079 
2080   // Perform the virtual adjustment if we have one.
2081   llvm::Value *ResultPtr;
2082   if (VirtualAdjustment) {
2083     Address VTablePtrPtr = CGF.Builder.CreateElementBitCast(V, CGF.Int8PtrTy);
2084     llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
2085 
2086     llvm::Value *Offset;
2087     llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
2088         CGF.Int8Ty, VTablePtr, VirtualAdjustment);
2089     if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
2090       // Load the adjustment offset from the vtable as a 32-bit int.
2091       OffsetPtr =
2092           CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
2093       Offset =
2094           CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
2095                                         CharUnits::fromQuantity(4));
2096     } else {
2097       llvm::Type *PtrDiffTy =
2098           CGF.ConvertType(CGF.getContext().getPointerDiffType());
2099 
2100       OffsetPtr =
2101           CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
2102 
2103       // Load the adjustment offset from the vtable.
2104       Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
2105                                              CGF.getPointerAlign());
2106     }
2107     // Adjust our pointer.
2108     ResultPtr = CGF.Builder.CreateInBoundsGEP(
2109         V.getElementType(), V.getPointer(), Offset);
2110   } else {
2111     ResultPtr = V.getPointer();
2112   }
2113 
2114   // In a derived-to-base conversion, the non-virtual adjustment is
2115   // applied second.
2116   if (NonVirtualAdjustment && IsReturnAdjustment) {
2117     ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr,
2118                                                        NonVirtualAdjustment);
2119   }
2120 
2121   // Cast back to the original type.
2122   return CGF.Builder.CreateBitCast(ResultPtr, InitialPtr.getType());
2123 }
2124 
2125 llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF,
2126                                                   Address This,
2127                                                   const ThisAdjustment &TA) {
2128   return performTypeAdjustment(CGF, This, TA.NonVirtual,
2129                                TA.Virtual.Itanium.VCallOffsetOffset,
2130                                /*IsReturnAdjustment=*/false);
2131 }
2132 
2133 llvm::Value *
2134 ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
2135                                        const ReturnAdjustment &RA) {
2136   return performTypeAdjustment(CGF, Ret, RA.NonVirtual,
2137                                RA.Virtual.Itanium.VBaseOffsetOffset,
2138                                /*IsReturnAdjustment=*/true);
2139 }
2140 
2141 void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
2142                                     RValue RV, QualType ResultType) {
2143   if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
2144     return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
2145 
2146   // Destructor thunks in the ARM ABI have indeterminate results.
2147   llvm::Type *T = CGF.ReturnValue.getElementType();
2148   RValue Undef = RValue::get(llvm::UndefValue::get(T));
2149   return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
2150 }
2151 
2152 /************************** Array allocation cookies **************************/
2153 
2154 CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2155   // The array cookie is a size_t; pad that up to the element alignment.
2156   // The cookie is actually right-justified in that space.
2157   return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes),
2158                   CGM.getContext().getPreferredTypeAlignInChars(elementType));
2159 }
2160 
2161 Address ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2162                                              Address NewPtr,
2163                                              llvm::Value *NumElements,
2164                                              const CXXNewExpr *expr,
2165                                              QualType ElementType) {
2166   assert(requiresArrayCookie(expr));
2167 
2168   unsigned AS = NewPtr.getAddressSpace();
2169 
2170   ASTContext &Ctx = getContext();
2171   CharUnits SizeSize = CGF.getSizeSize();
2172 
2173   // The size of the cookie.
2174   CharUnits CookieSize =
2175       std::max(SizeSize, Ctx.getPreferredTypeAlignInChars(ElementType));
2176   assert(CookieSize == getArrayCookieSizeImpl(ElementType));
2177 
2178   // Compute an offset to the cookie.
2179   Address CookiePtr = NewPtr;
2180   CharUnits CookieOffset = CookieSize - SizeSize;
2181   if (!CookieOffset.isZero())
2182     CookiePtr = CGF.Builder.CreateConstInBoundsByteGEP(CookiePtr, CookieOffset);
2183 
2184   // Write the number of elements into the appropriate slot.
2185   Address NumElementsPtr =
2186       CGF.Builder.CreateElementBitCast(CookiePtr, CGF.SizeTy);
2187   llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr);
2188 
2189   // Handle the array cookie specially in ASan.
2190   if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 &&
2191       (expr->getOperatorNew()->isReplaceableGlobalAllocationFunction() ||
2192        CGM.getCodeGenOpts().SanitizeAddressPoisonCustomArrayCookie)) {
2193     // The store to the CookiePtr does not need to be instrumented.
2194     CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI);
2195     llvm::FunctionType *FTy =
2196         llvm::FunctionType::get(CGM.VoidTy, NumElementsPtr.getType(), false);
2197     llvm::FunctionCallee F =
2198         CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie");
2199     CGF.Builder.CreateCall(F, NumElementsPtr.getPointer());
2200   }
2201 
2202   // Finally, compute a pointer to the actual data buffer by skipping
2203   // over the cookie completely.
2204   return CGF.Builder.CreateConstInBoundsByteGEP(NewPtr, CookieSize);
2205 }
2206 
2207 llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2208                                                 Address allocPtr,
2209                                                 CharUnits cookieSize) {
2210   // The element size is right-justified in the cookie.
2211   Address numElementsPtr = allocPtr;
2212   CharUnits numElementsOffset = cookieSize - CGF.getSizeSize();
2213   if (!numElementsOffset.isZero())
2214     numElementsPtr =
2215       CGF.Builder.CreateConstInBoundsByteGEP(numElementsPtr, numElementsOffset);
2216 
2217   unsigned AS = allocPtr.getAddressSpace();
2218   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2219   if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0)
2220     return CGF.Builder.CreateLoad(numElementsPtr);
2221   // In asan mode emit a function call instead of a regular load and let the
2222   // run-time deal with it: if the shadow is properly poisoned return the
2223   // cookie, otherwise return 0 to avoid an infinite loop calling DTORs.
2224   // We can't simply ignore this load using nosanitize metadata because
2225   // the metadata may be lost.
2226   llvm::FunctionType *FTy =
2227       llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false);
2228   llvm::FunctionCallee F =
2229       CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
2230   return CGF.Builder.CreateCall(F, numElementsPtr.getPointer());
2231 }
2232 
2233 CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) {
2234   // ARM says that the cookie is always:
2235   //   struct array_cookie {
2236   //     std::size_t element_size; // element_size != 0
2237   //     std::size_t element_count;
2238   //   };
2239   // But the base ABI doesn't give anything an alignment greater than
2240   // 8, so we can dismiss this as typical ABI-author blindness to
2241   // actual language complexity and round up to the element alignment.
2242   return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes),
2243                   CGM.getContext().getTypeAlignInChars(elementType));
2244 }
2245 
2246 Address ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
2247                                          Address newPtr,
2248                                          llvm::Value *numElements,
2249                                          const CXXNewExpr *expr,
2250                                          QualType elementType) {
2251   assert(requiresArrayCookie(expr));
2252 
2253   // The cookie is always at the start of the buffer.
2254   Address cookie = newPtr;
2255 
2256   // The first element is the element size.
2257   cookie = CGF.Builder.CreateElementBitCast(cookie, CGF.SizeTy);
2258   llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy,
2259                  getContext().getTypeSizeInChars(elementType).getQuantity());
2260   CGF.Builder.CreateStore(elementSize, cookie);
2261 
2262   // The second element is the element count.
2263   cookie = CGF.Builder.CreateConstInBoundsGEP(cookie, 1);
2264   CGF.Builder.CreateStore(numElements, cookie);
2265 
2266   // Finally, compute a pointer to the actual data buffer by skipping
2267   // over the cookie completely.
2268   CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType);
2269   return CGF.Builder.CreateConstInBoundsByteGEP(newPtr, cookieSize);
2270 }
2271 
2272 llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
2273                                             Address allocPtr,
2274                                             CharUnits cookieSize) {
2275   // The number of elements is at offset sizeof(size_t) relative to
2276   // the allocated pointer.
2277   Address numElementsPtr
2278     = CGF.Builder.CreateConstInBoundsByteGEP(allocPtr, CGF.getSizeSize());
2279 
2280   numElementsPtr = CGF.Builder.CreateElementBitCast(numElementsPtr, CGF.SizeTy);
2281   return CGF.Builder.CreateLoad(numElementsPtr);
2282 }
2283 
2284 /*********************** Static local initialization **************************/
2285 
2286 static llvm::FunctionCallee getGuardAcquireFn(CodeGenModule &CGM,
2287                                               llvm::PointerType *GuardPtrTy) {
2288   // int __cxa_guard_acquire(__guard *guard_object);
2289   llvm::FunctionType *FTy =
2290     llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
2291                             GuardPtrTy, /*isVarArg=*/false);
2292   return CGM.CreateRuntimeFunction(
2293       FTy, "__cxa_guard_acquire",
2294       llvm::AttributeList::get(CGM.getLLVMContext(),
2295                                llvm::AttributeList::FunctionIndex,
2296                                llvm::Attribute::NoUnwind));
2297 }
2298 
2299 static llvm::FunctionCallee getGuardReleaseFn(CodeGenModule &CGM,
2300                                               llvm::PointerType *GuardPtrTy) {
2301   // void __cxa_guard_release(__guard *guard_object);
2302   llvm::FunctionType *FTy =
2303     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2304   return CGM.CreateRuntimeFunction(
2305       FTy, "__cxa_guard_release",
2306       llvm::AttributeList::get(CGM.getLLVMContext(),
2307                                llvm::AttributeList::FunctionIndex,
2308                                llvm::Attribute::NoUnwind));
2309 }
2310 
2311 static llvm::FunctionCallee getGuardAbortFn(CodeGenModule &CGM,
2312                                             llvm::PointerType *GuardPtrTy) {
2313   // void __cxa_guard_abort(__guard *guard_object);
2314   llvm::FunctionType *FTy =
2315     llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
2316   return CGM.CreateRuntimeFunction(
2317       FTy, "__cxa_guard_abort",
2318       llvm::AttributeList::get(CGM.getLLVMContext(),
2319                                llvm::AttributeList::FunctionIndex,
2320                                llvm::Attribute::NoUnwind));
2321 }
2322 
2323 namespace {
2324   struct CallGuardAbort final : EHScopeStack::Cleanup {
2325     llvm::GlobalVariable *Guard;
2326     CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
2327 
2328     void Emit(CodeGenFunction &CGF, Flags flags) override {
2329       CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()),
2330                                   Guard);
2331     }
2332   };
2333 }
2334 
2335 /// The ARM code here follows the Itanium code closely enough that we
2336 /// just special-case it at particular places.
2337 void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
2338                                     const VarDecl &D,
2339                                     llvm::GlobalVariable *var,
2340                                     bool shouldPerformInit) {
2341   CGBuilderTy &Builder = CGF.Builder;
2342 
2343   // Inline variables that weren't instantiated from variable templates have
2344   // partially-ordered initialization within their translation unit.
2345   bool NonTemplateInline =
2346       D.isInline() &&
2347       !isTemplateInstantiation(D.getTemplateSpecializationKind());
2348 
2349   // We only need to use thread-safe statics for local non-TLS variables and
2350   // inline variables; other global initialization is always single-threaded
2351   // or (through lazy dynamic loading in multiple threads) unsequenced.
2352   bool threadsafe = getContext().getLangOpts().ThreadsafeStatics &&
2353                     (D.isLocalVarDecl() || NonTemplateInline) &&
2354                     !D.getTLSKind();
2355 
2356   // If we have a global variable with internal linkage and thread-safe statics
2357   // are disabled, we can just let the guard variable be of type i8.
2358   bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
2359 
2360   llvm::IntegerType *guardTy;
2361   CharUnits guardAlignment;
2362   if (useInt8GuardVariable) {
2363     guardTy = CGF.Int8Ty;
2364     guardAlignment = CharUnits::One();
2365   } else {
2366     // Guard variables are 64 bits in the generic ABI and size width on ARM
2367     // (i.e. 32-bit on AArch32, 64-bit on AArch64).
2368     if (UseARMGuardVarABI) {
2369       guardTy = CGF.SizeTy;
2370       guardAlignment = CGF.getSizeAlign();
2371     } else {
2372       guardTy = CGF.Int64Ty;
2373       guardAlignment = CharUnits::fromQuantity(
2374                              CGM.getDataLayout().getABITypeAlignment(guardTy));
2375     }
2376   }
2377   llvm::PointerType *guardPtrTy = guardTy->getPointerTo(
2378       CGF.CGM.getDataLayout().getDefaultGlobalsAddressSpace());
2379 
2380   // Create the guard variable if we don't already have it (as we
2381   // might if we're double-emitting this function body).
2382   llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
2383   if (!guard) {
2384     // Mangle the name for the guard.
2385     SmallString<256> guardName;
2386     {
2387       llvm::raw_svector_ostream out(guardName);
2388       getMangleContext().mangleStaticGuardVariable(&D, out);
2389     }
2390 
2391     // Create the guard variable with a zero-initializer.
2392     // Just absorb linkage and visibility from the guarded variable.
2393     guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
2394                                      false, var->getLinkage(),
2395                                      llvm::ConstantInt::get(guardTy, 0),
2396                                      guardName.str());
2397     guard->setDSOLocal(var->isDSOLocal());
2398     guard->setVisibility(var->getVisibility());
2399     // If the variable is thread-local, so is its guard variable.
2400     guard->setThreadLocalMode(var->getThreadLocalMode());
2401     guard->setAlignment(guardAlignment.getAsAlign());
2402 
2403     // The ABI says: "It is suggested that it be emitted in the same COMDAT
2404     // group as the associated data object." In practice, this doesn't work for
2405     // non-ELF and non-Wasm object formats, so only do it for ELF and Wasm.
2406     llvm::Comdat *C = var->getComdat();
2407     if (!D.isLocalVarDecl() && C &&
2408         (CGM.getTarget().getTriple().isOSBinFormatELF() ||
2409          CGM.getTarget().getTriple().isOSBinFormatWasm())) {
2410       guard->setComdat(C);
2411     } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) {
2412       guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName()));
2413     }
2414 
2415     CGM.setStaticLocalDeclGuardAddress(&D, guard);
2416   }
2417 
2418   Address guardAddr = Address(guard, guard->getValueType(), guardAlignment);
2419 
2420   // Test whether the variable has completed initialization.
2421   //
2422   // Itanium C++ ABI 3.3.2:
2423   //   The following is pseudo-code showing how these functions can be used:
2424   //     if (obj_guard.first_byte == 0) {
2425   //       if ( __cxa_guard_acquire (&obj_guard) ) {
2426   //         try {
2427   //           ... initialize the object ...;
2428   //         } catch (...) {
2429   //            __cxa_guard_abort (&obj_guard);
2430   //            throw;
2431   //         }
2432   //         ... queue object destructor with __cxa_atexit() ...;
2433   //         __cxa_guard_release (&obj_guard);
2434   //       }
2435   //     }
2436 
2437   // Load the first byte of the guard variable.
2438   llvm::LoadInst *LI =
2439       Builder.CreateLoad(Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2440 
2441   // Itanium ABI:
2442   //   An implementation supporting thread-safety on multiprocessor
2443   //   systems must also guarantee that references to the initialized
2444   //   object do not occur before the load of the initialization flag.
2445   //
2446   // In LLVM, we do this by marking the load Acquire.
2447   if (threadsafe)
2448     LI->setAtomic(llvm::AtomicOrdering::Acquire);
2449 
2450   // For ARM, we should only check the first bit, rather than the entire byte:
2451   //
2452   // ARM C++ ABI 3.2.3.1:
2453   //   To support the potential use of initialization guard variables
2454   //   as semaphores that are the target of ARM SWP and LDREX/STREX
2455   //   synchronizing instructions we define a static initialization
2456   //   guard variable to be a 4-byte aligned, 4-byte word with the
2457   //   following inline access protocol.
2458   //     #define INITIALIZED 1
2459   //     if ((obj_guard & INITIALIZED) != INITIALIZED) {
2460   //       if (__cxa_guard_acquire(&obj_guard))
2461   //         ...
2462   //     }
2463   //
2464   // and similarly for ARM64:
2465   //
2466   // ARM64 C++ ABI 3.2.2:
2467   //   This ABI instead only specifies the value bit 0 of the static guard
2468   //   variable; all other bits are platform defined. Bit 0 shall be 0 when the
2469   //   variable is not initialized and 1 when it is.
2470   llvm::Value *V =
2471       (UseARMGuardVarABI && !useInt8GuardVariable)
2472           ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1))
2473           : LI;
2474   llvm::Value *NeedsInit = Builder.CreateIsNull(V, "guard.uninitialized");
2475 
2476   llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
2477   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
2478 
2479   // Check if the first byte of the guard variable is zero.
2480   CGF.EmitCXXGuardedInitBranch(NeedsInit, InitCheckBlock, EndBlock,
2481                                CodeGenFunction::GuardKind::VariableGuard, &D);
2482 
2483   CGF.EmitBlock(InitCheckBlock);
2484 
2485   // Variables used when coping with thread-safe statics and exceptions.
2486   if (threadsafe) {
2487     // Call __cxa_guard_acquire.
2488     llvm::Value *V
2489       = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
2490 
2491     llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
2492 
2493     Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
2494                          InitBlock, EndBlock);
2495 
2496     // Call __cxa_guard_abort along the exceptional edge.
2497     CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
2498 
2499     CGF.EmitBlock(InitBlock);
2500   }
2501 
2502   // Emit the initializer and add a global destructor if appropriate.
2503   CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
2504 
2505   if (threadsafe) {
2506     // Pop the guard-abort cleanup if we pushed one.
2507     CGF.PopCleanupBlock();
2508 
2509     // Call __cxa_guard_release.  This cannot throw.
2510     CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
2511                                 guardAddr.getPointer());
2512   } else {
2513     // Store 1 into the first byte of the guard variable after initialization is
2514     // complete.
2515     Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
2516                         Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
2517   }
2518 
2519   CGF.EmitBlock(EndBlock);
2520 }
2521 
2522 /// Register a global destructor using __cxa_atexit.
2523 static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
2524                                         llvm::FunctionCallee dtor,
2525                                         llvm::Constant *addr, bool TLS) {
2526   assert(!CGF.getTarget().getTriple().isOSAIX() &&
2527          "unexpected call to emitGlobalDtorWithCXAAtExit");
2528   assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
2529          "__cxa_atexit is disabled");
2530   const char *Name = "__cxa_atexit";
2531   if (TLS) {
2532     const llvm::Triple &T = CGF.getTarget().getTriple();
2533     Name = T.isOSDarwin() ?  "_tlv_atexit" : "__cxa_thread_atexit";
2534   }
2535 
2536   // We're assuming that the destructor function is something we can
2537   // reasonably call with the default CC.  Go ahead and cast it to the
2538   // right prototype.
2539   llvm::Type *dtorTy =
2540     llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
2541 
2542   // Preserve address space of addr.
2543   auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
2544   auto AddrInt8PtrTy =
2545       AddrAS ? CGF.Int8Ty->getPointerTo(AddrAS) : CGF.Int8PtrTy;
2546 
2547   // Create a variable that binds the atexit to this shared object.
2548   llvm::Constant *handle =
2549       CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
2550   auto *GV = cast<llvm::GlobalValue>(handle->stripPointerCasts());
2551   GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
2552 
2553   // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
2554   llvm::Type *paramTys[] = {dtorTy, AddrInt8PtrTy, handle->getType()};
2555   llvm::FunctionType *atexitTy =
2556     llvm::FunctionType::get(CGF.IntTy, paramTys, false);
2557 
2558   // Fetch the actual function.
2559   llvm::FunctionCallee atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name);
2560   if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit.getCallee()))
2561     fn->setDoesNotThrow();
2562 
2563   if (!addr)
2564     // addr is null when we are trying to register a dtor annotated with
2565     // __attribute__((destructor)) in a constructor function. Using null here is
2566     // okay because this argument is just passed back to the destructor
2567     // function.
2568     addr = llvm::Constant::getNullValue(CGF.Int8PtrTy);
2569 
2570   llvm::Value *args[] = {llvm::ConstantExpr::getBitCast(
2571                              cast<llvm::Constant>(dtor.getCallee()), dtorTy),
2572                          llvm::ConstantExpr::getBitCast(addr, AddrInt8PtrTy),
2573                          handle};
2574   CGF.EmitNounwindRuntimeCall(atexit, args);
2575 }
2576 
2577 static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
2578                                                    StringRef FnName) {
2579   // Create a function that registers/unregisters destructors that have the same
2580   // priority.
2581   llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
2582   llvm::Function *GlobalInitOrCleanupFn = CGM.CreateGlobalInitOrCleanUpFunction(
2583       FTy, FnName, CGM.getTypes().arrangeNullaryFunction(), SourceLocation());
2584 
2585   return GlobalInitOrCleanupFn;
2586 }
2587 
2588 void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
2589   for (const auto &I : DtorsUsingAtExit) {
2590     int Priority = I.first;
2591     std::string GlobalCleanupFnName =
2592         std::string("__GLOBAL_cleanup_") + llvm::to_string(Priority);
2593 
2594     llvm::Function *GlobalCleanupFn =
2595         createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
2596 
2597     CodeGenFunction CGF(*this);
2598     CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn,
2599                       getTypes().arrangeNullaryFunction(), FunctionArgList(),
2600                       SourceLocation(), SourceLocation());
2601     auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2602 
2603     // Get the destructor function type, void(*)(void).
2604     llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
2605     llvm::Type *dtorTy = dtorFuncTy->getPointerTo();
2606 
2607     // Destructor functions are run/unregistered in non-ascending
2608     // order of their priorities.
2609     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2610     auto itv = Dtors.rbegin();
2611     while (itv != Dtors.rend()) {
2612       llvm::Function *Dtor = *itv;
2613 
2614       // We're assuming that the destructor function is something we can
2615       // reasonably call with the correct CC.  Go ahead and cast it to the
2616       // right prototype.
2617       llvm::Constant *dtor = llvm::ConstantExpr::getBitCast(Dtor, dtorTy);
2618       llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtor);
2619       llvm::Value *NeedsDestruct =
2620           CGF.Builder.CreateIsNull(V, "needs_destruct");
2621 
2622       llvm::BasicBlock *DestructCallBlock =
2623           CGF.createBasicBlock("destruct.call");
2624       llvm::BasicBlock *EndBlock = CGF.createBasicBlock(
2625           (itv + 1) != Dtors.rend() ? "unatexit.call" : "destruct.end");
2626       // Check if unatexit returns a value of 0. If it does, jump to
2627       // DestructCallBlock, otherwise jump to EndBlock directly.
2628       CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
2629 
2630       CGF.EmitBlock(DestructCallBlock);
2631 
2632       // Emit the call to casted Dtor.
2633       llvm::CallInst *CI = CGF.Builder.CreateCall(dtorFuncTy, dtor);
2634       // Make sure the call and the callee agree on calling convention.
2635       CI->setCallingConv(Dtor->getCallingConv());
2636 
2637       CGF.EmitBlock(EndBlock);
2638 
2639       itv++;
2640     }
2641 
2642     CGF.FinishFunction();
2643     AddGlobalDtor(GlobalCleanupFn, Priority);
2644   }
2645 }
2646 
2647 void CodeGenModule::registerGlobalDtorsWithAtExit() {
2648   for (const auto &I : DtorsUsingAtExit) {
2649     int Priority = I.first;
2650     std::string GlobalInitFnName =
2651         std::string("__GLOBAL_init_") + llvm::to_string(Priority);
2652     llvm::Function *GlobalInitFn =
2653         createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
2654 
2655     CodeGenFunction CGF(*this);
2656     CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn,
2657                       getTypes().arrangeNullaryFunction(), FunctionArgList(),
2658                       SourceLocation(), SourceLocation());
2659     auto AL = ApplyDebugLocation::CreateArtificial(CGF);
2660 
2661     // Since constructor functions are run in non-descending order of their
2662     // priorities, destructors are registered in non-descending order of their
2663     // priorities, and since destructor functions are run in the reverse order
2664     // of their registration, destructor functions are run in non-ascending
2665     // order of their priorities.
2666     const llvm::TinyPtrVector<llvm::Function *> &Dtors = I.second;
2667     for (auto *Dtor : Dtors) {
2668       // Register the destructor function calling __cxa_atexit if it is
2669       // available. Otherwise fall back on calling atexit.
2670       if (getCodeGenOpts().CXAAtExit) {
2671         emitGlobalDtorWithCXAAtExit(CGF, Dtor, nullptr, false);
2672       } else {
2673         // Get the destructor function type, void(*)(void).
2674         llvm::Type *dtorTy =
2675             llvm::FunctionType::get(CGF.VoidTy, false)->getPointerTo();
2676 
2677         // We're assuming that the destructor function is something we can
2678         // reasonably call with the correct CC.  Go ahead and cast it to the
2679         // right prototype.
2680         CGF.registerGlobalDtorWithAtExit(
2681             llvm::ConstantExpr::getBitCast(Dtor, dtorTy));
2682       }
2683     }
2684 
2685     CGF.FinishFunction();
2686     AddGlobalCtor(GlobalInitFn, Priority, nullptr);
2687   }
2688 
2689   if (getCXXABI().useSinitAndSterm())
2690     unregisterGlobalDtorsWithUnAtExit();
2691 }
2692 
2693 /// Register a global destructor as best as we know how.
2694 void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
2695                                        llvm::FunctionCallee dtor,
2696                                        llvm::Constant *addr) {
2697   if (D.isNoDestroy(CGM.getContext()))
2698     return;
2699 
2700   // emitGlobalDtorWithCXAAtExit will emit a call to either __cxa_thread_atexit
2701   // or __cxa_atexit depending on whether this VarDecl is a thread-local storage
2702   // or not. CXAAtExit controls only __cxa_atexit, so use it if it is enabled.
2703   // We can always use __cxa_thread_atexit.
2704   if (CGM.getCodeGenOpts().CXAAtExit || D.getTLSKind())
2705     return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind());
2706 
2707   // In Apple kexts, we want to add a global destructor entry.
2708   // FIXME: shouldn't this be guarded by some variable?
2709   if (CGM.getLangOpts().AppleKext) {
2710     // Generate a global destructor entry.
2711     return CGM.AddCXXDtorEntry(dtor, addr);
2712   }
2713 
2714   CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
2715 }
2716 
2717 static bool isThreadWrapperReplaceable(const VarDecl *VD,
2718                                        CodeGen::CodeGenModule &CGM) {
2719   assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!");
2720   // Darwin prefers to have references to thread local variables to go through
2721   // the thread wrapper instead of directly referencing the backing variable.
2722   return VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2723          CGM.getTarget().getTriple().isOSDarwin();
2724 }
2725 
2726 /// Get the appropriate linkage for the wrapper function. This is essentially
2727 /// the weak form of the variable's linkage; every translation unit which needs
2728 /// the wrapper emits a copy, and we want the linker to merge them.
2729 static llvm::GlobalValue::LinkageTypes
2730 getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) {
2731   llvm::GlobalValue::LinkageTypes VarLinkage =
2732       CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
2733 
2734   // For internal linkage variables, we don't need an external or weak wrapper.
2735   if (llvm::GlobalValue::isLocalLinkage(VarLinkage))
2736     return VarLinkage;
2737 
2738   // If the thread wrapper is replaceable, give it appropriate linkage.
2739   if (isThreadWrapperReplaceable(VD, CGM))
2740     if (!llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) &&
2741         !llvm::GlobalVariable::isWeakODRLinkage(VarLinkage))
2742       return VarLinkage;
2743   return llvm::GlobalValue::WeakODRLinkage;
2744 }
2745 
2746 llvm::Function *
2747 ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
2748                                              llvm::Value *Val) {
2749   // Mangle the name for the thread_local wrapper function.
2750   SmallString<256> WrapperName;
2751   {
2752     llvm::raw_svector_ostream Out(WrapperName);
2753     getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out);
2754   }
2755 
2756   // FIXME: If VD is a definition, we should regenerate the function attributes
2757   // before returning.
2758   if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName))
2759     return cast<llvm::Function>(V);
2760 
2761   QualType RetQT = VD->getType();
2762   if (RetQT->isReferenceType())
2763     RetQT = RetQT.getNonReferenceType();
2764 
2765   const CGFunctionInfo &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(
2766       getContext().getPointerType(RetQT), FunctionArgList());
2767 
2768   llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FI);
2769   llvm::Function *Wrapper =
2770       llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM),
2771                              WrapperName.str(), &CGM.getModule());
2772 
2773   if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
2774     Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
2775 
2776   CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false);
2777 
2778   // Always resolve references to the wrapper at link time.
2779   if (!Wrapper->hasLocalLinkage())
2780     if (!isThreadWrapperReplaceable(VD, CGM) ||
2781         llvm::GlobalVariable::isLinkOnceLinkage(Wrapper->getLinkage()) ||
2782         llvm::GlobalVariable::isWeakODRLinkage(Wrapper->getLinkage()) ||
2783         VD->getVisibility() == HiddenVisibility)
2784       Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility);
2785 
2786   if (isThreadWrapperReplaceable(VD, CGM)) {
2787     Wrapper->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2788     Wrapper->addFnAttr(llvm::Attribute::NoUnwind);
2789   }
2790 
2791   ThreadWrappers.push_back({VD, Wrapper});
2792   return Wrapper;
2793 }
2794 
2795 void ItaniumCXXABI::EmitThreadLocalInitFuncs(
2796     CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
2797     ArrayRef<llvm::Function *> CXXThreadLocalInits,
2798     ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
2799   llvm::Function *InitFunc = nullptr;
2800 
2801   // Separate initializers into those with ordered (or partially-ordered)
2802   // initialization and those with unordered initialization.
2803   llvm::SmallVector<llvm::Function *, 8> OrderedInits;
2804   llvm::SmallDenseMap<const VarDecl *, llvm::Function *> UnorderedInits;
2805   for (unsigned I = 0; I != CXXThreadLocalInits.size(); ++I) {
2806     if (isTemplateInstantiation(
2807             CXXThreadLocalInitVars[I]->getTemplateSpecializationKind()))
2808       UnorderedInits[CXXThreadLocalInitVars[I]->getCanonicalDecl()] =
2809           CXXThreadLocalInits[I];
2810     else
2811       OrderedInits.push_back(CXXThreadLocalInits[I]);
2812   }
2813 
2814   if (!OrderedInits.empty()) {
2815     // Generate a guarded initialization function.
2816     llvm::FunctionType *FTy =
2817         llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
2818     const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2819     InitFunc = CGM.CreateGlobalInitOrCleanUpFunction(FTy, "__tls_init", FI,
2820                                                      SourceLocation(),
2821                                                      /*TLS=*/true);
2822     llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
2823         CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
2824         llvm::GlobalVariable::InternalLinkage,
2825         llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard");
2826     Guard->setThreadLocal(true);
2827     Guard->setThreadLocalMode(CGM.GetDefaultLLVMTLSModel());
2828 
2829     CharUnits GuardAlign = CharUnits::One();
2830     Guard->setAlignment(GuardAlign.getAsAlign());
2831 
2832     CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(
2833         InitFunc, OrderedInits, ConstantAddress(Guard, CGM.Int8Ty, GuardAlign));
2834     // On Darwin platforms, use CXX_FAST_TLS calling convention.
2835     if (CGM.getTarget().getTriple().isOSDarwin()) {
2836       InitFunc->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2837       InitFunc->addFnAttr(llvm::Attribute::NoUnwind);
2838     }
2839   }
2840 
2841   // Create declarations for thread wrappers for all thread-local variables
2842   // with non-discardable definitions in this translation unit.
2843   for (const VarDecl *VD : CXXThreadLocals) {
2844     if (VD->hasDefinition() &&
2845         !isDiscardableGVALinkage(getContext().GetGVALinkageForVariable(VD))) {
2846       llvm::GlobalValue *GV = CGM.GetGlobalValue(CGM.getMangledName(VD));
2847       getOrCreateThreadLocalWrapper(VD, GV);
2848     }
2849   }
2850 
2851   // Emit all referenced thread wrappers.
2852   for (auto VDAndWrapper : ThreadWrappers) {
2853     const VarDecl *VD = VDAndWrapper.first;
2854     llvm::GlobalVariable *Var =
2855         cast<llvm::GlobalVariable>(CGM.GetGlobalValue(CGM.getMangledName(VD)));
2856     llvm::Function *Wrapper = VDAndWrapper.second;
2857 
2858     // Some targets require that all access to thread local variables go through
2859     // the thread wrapper.  This means that we cannot attempt to create a thread
2860     // wrapper or a thread helper.
2861     if (!VD->hasDefinition()) {
2862       if (isThreadWrapperReplaceable(VD, CGM)) {
2863         Wrapper->setLinkage(llvm::Function::ExternalLinkage);
2864         continue;
2865       }
2866 
2867       // If this isn't a TU in which this variable is defined, the thread
2868       // wrapper is discardable.
2869       if (Wrapper->getLinkage() == llvm::Function::WeakODRLinkage)
2870         Wrapper->setLinkage(llvm::Function::LinkOnceODRLinkage);
2871     }
2872 
2873     CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Wrapper);
2874 
2875     // Mangle the name for the thread_local initialization function.
2876     SmallString<256> InitFnName;
2877     {
2878       llvm::raw_svector_ostream Out(InitFnName);
2879       getMangleContext().mangleItaniumThreadLocalInit(VD, Out);
2880     }
2881 
2882     llvm::FunctionType *InitFnTy = llvm::FunctionType::get(CGM.VoidTy, false);
2883 
2884     // If we have a definition for the variable, emit the initialization
2885     // function as an alias to the global Init function (if any). Otherwise,
2886     // produce a declaration of the initialization function.
2887     llvm::GlobalValue *Init = nullptr;
2888     bool InitIsInitFunc = false;
2889     bool HasConstantInitialization = false;
2890     if (!usesThreadWrapperFunction(VD)) {
2891       HasConstantInitialization = true;
2892     } else if (VD->hasDefinition()) {
2893       InitIsInitFunc = true;
2894       llvm::Function *InitFuncToUse = InitFunc;
2895       if (isTemplateInstantiation(VD->getTemplateSpecializationKind()))
2896         InitFuncToUse = UnorderedInits.lookup(VD->getCanonicalDecl());
2897       if (InitFuncToUse)
2898         Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(),
2899                                          InitFuncToUse);
2900     } else {
2901       // Emit a weak global function referring to the initialization function.
2902       // This function will not exist if the TU defining the thread_local
2903       // variable in question does not need any dynamic initialization for
2904       // its thread_local variables.
2905       Init = llvm::Function::Create(InitFnTy,
2906                                     llvm::GlobalVariable::ExternalWeakLinkage,
2907                                     InitFnName.str(), &CGM.getModule());
2908       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2909       CGM.SetLLVMFunctionAttributes(
2910           GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false);
2911     }
2912 
2913     if (Init) {
2914       Init->setVisibility(Var->getVisibility());
2915       // Don't mark an extern_weak function DSO local on windows.
2916       if (!CGM.getTriple().isOSWindows() || !Init->hasExternalWeakLinkage())
2917         Init->setDSOLocal(Var->isDSOLocal());
2918     }
2919 
2920     llvm::LLVMContext &Context = CGM.getModule().getContext();
2921 
2922     // The linker on AIX is not happy with missing weak symbols.  However,
2923     // other TUs will not know whether the initialization routine exists
2924     // so create an empty, init function to satisfy the linker.
2925     // This is needed whenever a thread wrapper function is not used, and
2926     // also when the symbol is weak.
2927     if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
2928         isEmittedWithConstantInitializer(VD, true) &&
2929         !mayNeedDestruction(VD)) {
2930       // Init should be null.  If it were non-null, then the logic above would
2931       // either be defining the function to be an alias or declaring the
2932       // function with the expectation that the definition of the variable
2933       // is elsewhere.
2934       assert(Init == nullptr && "Expected Init to be null.");
2935 
2936       llvm::Function *Func = llvm::Function::Create(
2937           InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule());
2938       const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
2939       CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
2940                                     cast<llvm::Function>(Func),
2941                                     /*IsThunk=*/false);
2942       // Create a function body that just returns
2943       llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func);
2944       CGBuilderTy Builder(CGM, Entry);
2945       Builder.CreateRetVoid();
2946     }
2947 
2948     llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
2949     CGBuilderTy Builder(CGM, Entry);
2950     if (HasConstantInitialization) {
2951       // No dynamic initialization to invoke.
2952     } else if (InitIsInitFunc) {
2953       if (Init) {
2954         llvm::CallInst *CallVal = Builder.CreateCall(InitFnTy, Init);
2955         if (isThreadWrapperReplaceable(VD, CGM)) {
2956           CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2957           llvm::Function *Fn =
2958               cast<llvm::Function>(cast<llvm::GlobalAlias>(Init)->getAliasee());
2959           Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
2960         }
2961       }
2962     } else if (CGM.getTriple().isOSAIX()) {
2963       // On AIX, except if constinit and also neither of class type or of
2964       // (possibly multi-dimensional) array of class type, thread_local vars
2965       // will have init routines regardless of whether they are
2966       // const-initialized.  Since the routine is guaranteed to exist, we can
2967       // unconditionally call it without testing for its existance.  This
2968       // avoids potentially unresolved weak symbols which the AIX linker
2969       // isn't happy with.
2970       Builder.CreateCall(InitFnTy, Init);
2971     } else {
2972       // Don't know whether we have an init function. Call it if it exists.
2973       llvm::Value *Have = Builder.CreateIsNotNull(Init);
2974       llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2975       llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper);
2976       Builder.CreateCondBr(Have, InitBB, ExitBB);
2977 
2978       Builder.SetInsertPoint(InitBB);
2979       Builder.CreateCall(InitFnTy, Init);
2980       Builder.CreateBr(ExitBB);
2981 
2982       Builder.SetInsertPoint(ExitBB);
2983     }
2984 
2985     // For a reference, the result of the wrapper function is a pointer to
2986     // the referenced object.
2987     llvm::Value *Val = Var;
2988     if (VD->getType()->isReferenceType()) {
2989       CharUnits Align = CGM.getContext().getDeclAlign(VD);
2990       Val = Builder.CreateAlignedLoad(Var->getValueType(), Var, Align);
2991     }
2992     if (Val->getType() != Wrapper->getReturnType())
2993       Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
2994           Val, Wrapper->getReturnType(), "");
2995     Builder.CreateRet(Val);
2996   }
2997 }
2998 
2999 LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
3000                                                    const VarDecl *VD,
3001                                                    QualType LValType) {
3002   llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD);
3003   llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
3004 
3005   llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
3006   CallVal->setCallingConv(Wrapper->getCallingConv());
3007 
3008   LValue LV;
3009   if (VD->getType()->isReferenceType())
3010     LV = CGF.MakeNaturalAlignAddrLValue(CallVal, LValType);
3011   else
3012     LV = CGF.MakeAddrLValue(CallVal, LValType,
3013                             CGF.getContext().getDeclAlign(VD));
3014   // FIXME: need setObjCGCLValueClass?
3015   return LV;
3016 }
3017 
3018 /// Return whether the given global decl needs a VTT parameter, which it does
3019 /// if it's a base constructor or destructor with virtual bases.
3020 bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) {
3021   const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
3022 
3023   // We don't have any virtual bases, just return early.
3024   if (!MD->getParent()->getNumVBases())
3025     return false;
3026 
3027   // Check if we have a base constructor.
3028   if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
3029     return true;
3030 
3031   // Check if we have a base destructor.
3032   if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
3033     return true;
3034 
3035   return false;
3036 }
3037 
3038 namespace {
3039 class ItaniumRTTIBuilder {
3040   CodeGenModule &CGM;  // Per-module state.
3041   llvm::LLVMContext &VMContext;
3042   const ItaniumCXXABI &CXXABI;  // Per-module state.
3043 
3044   /// Fields - The fields of the RTTI descriptor currently being built.
3045   SmallVector<llvm::Constant *, 16> Fields;
3046 
3047   /// GetAddrOfTypeName - Returns the mangled type name of the given type.
3048   llvm::GlobalVariable *
3049   GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
3050 
3051   /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
3052   /// descriptor of the given type.
3053   llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
3054 
3055   /// BuildVTablePointer - Build the vtable pointer for the given type.
3056   void BuildVTablePointer(const Type *Ty);
3057 
3058   /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3059   /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
3060   void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
3061 
3062   /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3063   /// classes with bases that do not satisfy the abi::__si_class_type_info
3064   /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3065   void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
3066 
3067   /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
3068   /// for pointer types.
3069   void BuildPointerTypeInfo(QualType PointeeTy);
3070 
3071   /// BuildObjCObjectTypeInfo - Build the appropriate kind of
3072   /// type_info for an object type.
3073   void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
3074 
3075   /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
3076   /// struct, used for member pointer types.
3077   void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
3078 
3079 public:
3080   ItaniumRTTIBuilder(const ItaniumCXXABI &ABI)
3081       : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {}
3082 
3083   // Pointer type info flags.
3084   enum {
3085     /// PTI_Const - Type has const qualifier.
3086     PTI_Const = 0x1,
3087 
3088     /// PTI_Volatile - Type has volatile qualifier.
3089     PTI_Volatile = 0x2,
3090 
3091     /// PTI_Restrict - Type has restrict qualifier.
3092     PTI_Restrict = 0x4,
3093 
3094     /// PTI_Incomplete - Type is incomplete.
3095     PTI_Incomplete = 0x8,
3096 
3097     /// PTI_ContainingClassIncomplete - Containing class is incomplete.
3098     /// (in pointer to member).
3099     PTI_ContainingClassIncomplete = 0x10,
3100 
3101     /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
3102     //PTI_TransactionSafe = 0x20,
3103 
3104     /// PTI_Noexcept - Pointee is noexcept function (C++1z).
3105     PTI_Noexcept = 0x40,
3106   };
3107 
3108   // VMI type info flags.
3109   enum {
3110     /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
3111     VMI_NonDiamondRepeat = 0x1,
3112 
3113     /// VMI_DiamondShaped - Class is diamond shaped.
3114     VMI_DiamondShaped = 0x2
3115   };
3116 
3117   // Base class type info flags.
3118   enum {
3119     /// BCTI_Virtual - Base class is virtual.
3120     BCTI_Virtual = 0x1,
3121 
3122     /// BCTI_Public - Base class is public.
3123     BCTI_Public = 0x2
3124   };
3125 
3126   /// BuildTypeInfo - Build the RTTI type info struct for the given type, or
3127   /// link to an existing RTTI descriptor if one already exists.
3128   llvm::Constant *BuildTypeInfo(QualType Ty);
3129 
3130   /// BuildTypeInfo - Build the RTTI type info struct for the given type.
3131   llvm::Constant *BuildTypeInfo(
3132       QualType Ty,
3133       llvm::GlobalVariable::LinkageTypes Linkage,
3134       llvm::GlobalValue::VisibilityTypes Visibility,
3135       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass);
3136 };
3137 }
3138 
3139 llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName(
3140     QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) {
3141   SmallString<256> Name;
3142   llvm::raw_svector_ostream Out(Name);
3143   CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
3144 
3145   // We know that the mangled name of the type starts at index 4 of the
3146   // mangled name of the typename, so we can just index into it in order to
3147   // get the mangled name of the type.
3148   llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
3149                                                             Name.substr(4));
3150   auto Align = CGM.getContext().getTypeAlignInChars(CGM.getContext().CharTy);
3151 
3152   llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
3153       Name, Init->getType(), Linkage, Align.getQuantity());
3154 
3155   GV->setInitializer(Init);
3156 
3157   return GV;
3158 }
3159 
3160 llvm::Constant *
3161 ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
3162   // Mangle the RTTI name.
3163   SmallString<256> Name;
3164   llvm::raw_svector_ostream Out(Name);
3165   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3166 
3167   // Look for an existing global.
3168   llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
3169 
3170   if (!GV) {
3171     // Create a new global variable.
3172     // Note for the future: If we would ever like to do deferred emission of
3173     // RTTI, check if emitting vtables opportunistically need any adjustment.
3174 
3175     GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
3176                                   /*isConstant=*/true,
3177                                   llvm::GlobalValue::ExternalLinkage, nullptr,
3178                                   Name);
3179     const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
3180     CGM.setGVProperties(GV, RD);
3181     // Import the typeinfo symbol when all non-inline virtual methods are
3182     // imported.
3183     if (CGM.getTarget().hasPS4DLLImportExport()) {
3184       if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) {
3185         GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
3186         CGM.setDSOLocal(GV);
3187       }
3188     }
3189   }
3190 
3191   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3192 }
3193 
3194 /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
3195 /// info for that type is defined in the standard library.
3196 static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
3197   // Itanium C++ ABI 2.9.2:
3198   //   Basic type information (e.g. for "int", "bool", etc.) will be kept in
3199   //   the run-time support library. Specifically, the run-time support
3200   //   library should contain type_info objects for the types X, X* and
3201   //   X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
3202   //   unsigned char, signed char, short, unsigned short, int, unsigned int,
3203   //   long, unsigned long, long long, unsigned long long, float, double,
3204   //   long double, char16_t, char32_t, and the IEEE 754r decimal and
3205   //   half-precision floating point types.
3206   //
3207   // GCC also emits RTTI for __int128.
3208   // FIXME: We do not emit RTTI information for decimal types here.
3209 
3210   // Types added here must also be added to EmitFundamentalRTTIDescriptors.
3211   switch (Ty->getKind()) {
3212     case BuiltinType::Void:
3213     case BuiltinType::NullPtr:
3214     case BuiltinType::Bool:
3215     case BuiltinType::WChar_S:
3216     case BuiltinType::WChar_U:
3217     case BuiltinType::Char_U:
3218     case BuiltinType::Char_S:
3219     case BuiltinType::UChar:
3220     case BuiltinType::SChar:
3221     case BuiltinType::Short:
3222     case BuiltinType::UShort:
3223     case BuiltinType::Int:
3224     case BuiltinType::UInt:
3225     case BuiltinType::Long:
3226     case BuiltinType::ULong:
3227     case BuiltinType::LongLong:
3228     case BuiltinType::ULongLong:
3229     case BuiltinType::Half:
3230     case BuiltinType::Float:
3231     case BuiltinType::Double:
3232     case BuiltinType::LongDouble:
3233     case BuiltinType::Float16:
3234     case BuiltinType::Float128:
3235     case BuiltinType::Ibm128:
3236     case BuiltinType::Char8:
3237     case BuiltinType::Char16:
3238     case BuiltinType::Char32:
3239     case BuiltinType::Int128:
3240     case BuiltinType::UInt128:
3241       return true;
3242 
3243 #define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
3244     case BuiltinType::Id:
3245 #include "clang/Basic/OpenCLImageTypes.def"
3246 #define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
3247     case BuiltinType::Id:
3248 #include "clang/Basic/OpenCLExtensionTypes.def"
3249     case BuiltinType::OCLSampler:
3250     case BuiltinType::OCLEvent:
3251     case BuiltinType::OCLClkEvent:
3252     case BuiltinType::OCLQueue:
3253     case BuiltinType::OCLReserveID:
3254 #define SVE_TYPE(Name, Id, SingletonId) \
3255     case BuiltinType::Id:
3256 #include "clang/Basic/AArch64SVEACLETypes.def"
3257 #define PPC_VECTOR_TYPE(Name, Id, Size) \
3258     case BuiltinType::Id:
3259 #include "clang/Basic/PPCTypes.def"
3260 #define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
3261 #include "clang/Basic/RISCVVTypes.def"
3262     case BuiltinType::ShortAccum:
3263     case BuiltinType::Accum:
3264     case BuiltinType::LongAccum:
3265     case BuiltinType::UShortAccum:
3266     case BuiltinType::UAccum:
3267     case BuiltinType::ULongAccum:
3268     case BuiltinType::ShortFract:
3269     case BuiltinType::Fract:
3270     case BuiltinType::LongFract:
3271     case BuiltinType::UShortFract:
3272     case BuiltinType::UFract:
3273     case BuiltinType::ULongFract:
3274     case BuiltinType::SatShortAccum:
3275     case BuiltinType::SatAccum:
3276     case BuiltinType::SatLongAccum:
3277     case BuiltinType::SatUShortAccum:
3278     case BuiltinType::SatUAccum:
3279     case BuiltinType::SatULongAccum:
3280     case BuiltinType::SatShortFract:
3281     case BuiltinType::SatFract:
3282     case BuiltinType::SatLongFract:
3283     case BuiltinType::SatUShortFract:
3284     case BuiltinType::SatUFract:
3285     case BuiltinType::SatULongFract:
3286     case BuiltinType::BFloat16:
3287       return false;
3288 
3289     case BuiltinType::Dependent:
3290 #define BUILTIN_TYPE(Id, SingletonId)
3291 #define PLACEHOLDER_TYPE(Id, SingletonId) \
3292     case BuiltinType::Id:
3293 #include "clang/AST/BuiltinTypes.def"
3294       llvm_unreachable("asking for RRTI for a placeholder type!");
3295 
3296     case BuiltinType::ObjCId:
3297     case BuiltinType::ObjCClass:
3298     case BuiltinType::ObjCSel:
3299       llvm_unreachable("FIXME: Objective-C types are unsupported!");
3300   }
3301 
3302   llvm_unreachable("Invalid BuiltinType Kind!");
3303 }
3304 
3305 static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
3306   QualType PointeeTy = PointerTy->getPointeeType();
3307   const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
3308   if (!BuiltinTy)
3309     return false;
3310 
3311   // Check the qualifiers.
3312   Qualifiers Quals = PointeeTy.getQualifiers();
3313   Quals.removeConst();
3314 
3315   if (!Quals.empty())
3316     return false;
3317 
3318   return TypeInfoIsInStandardLibrary(BuiltinTy);
3319 }
3320 
3321 /// IsStandardLibraryRTTIDescriptor - Returns whether the type
3322 /// information for the given type exists in the standard library.
3323 static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
3324   // Type info for builtin types is defined in the standard library.
3325   if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
3326     return TypeInfoIsInStandardLibrary(BuiltinTy);
3327 
3328   // Type info for some pointer types to builtin types is defined in the
3329   // standard library.
3330   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3331     return TypeInfoIsInStandardLibrary(PointerTy);
3332 
3333   return false;
3334 }
3335 
3336 /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
3337 /// the given type exists somewhere else, and that we should not emit the type
3338 /// information in this translation unit.  Assumes that it is not a
3339 /// standard-library type.
3340 static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
3341                                             QualType Ty) {
3342   ASTContext &Context = CGM.getContext();
3343 
3344   // If RTTI is disabled, assume it might be disabled in the
3345   // translation unit that defines any potential key function, too.
3346   if (!Context.getLangOpts().RTTI) return false;
3347 
3348   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3349     const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3350     if (!RD->hasDefinition())
3351       return false;
3352 
3353     if (!RD->isDynamicClass())
3354       return false;
3355 
3356     // FIXME: this may need to be reconsidered if the key function
3357     // changes.
3358     // N.B. We must always emit the RTTI data ourselves if there exists a key
3359     // function.
3360     bool IsDLLImport = RD->hasAttr<DLLImportAttr>();
3361 
3362     // Don't import the RTTI but emit it locally.
3363     if (CGM.getTriple().isWindowsGNUEnvironment())
3364       return false;
3365 
3366     if (CGM.getVTables().isVTableExternal(RD)) {
3367       if (CGM.getTarget().hasPS4DLLImportExport())
3368         return true;
3369 
3370       return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
3371                  ? false
3372                  : true;
3373     }
3374     if (IsDLLImport)
3375       return true;
3376   }
3377 
3378   return false;
3379 }
3380 
3381 /// IsIncompleteClassType - Returns whether the given record type is incomplete.
3382 static bool IsIncompleteClassType(const RecordType *RecordTy) {
3383   return !RecordTy->getDecl()->isCompleteDefinition();
3384 }
3385 
3386 /// ContainsIncompleteClassType - Returns whether the given type contains an
3387 /// incomplete class type. This is true if
3388 ///
3389 ///   * The given type is an incomplete class type.
3390 ///   * The given type is a pointer type whose pointee type contains an
3391 ///     incomplete class type.
3392 ///   * The given type is a member pointer type whose class is an incomplete
3393 ///     class type.
3394 ///   * The given type is a member pointer type whoise pointee type contains an
3395 ///     incomplete class type.
3396 /// is an indirect or direct pointer to an incomplete class type.
3397 static bool ContainsIncompleteClassType(QualType Ty) {
3398   if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3399     if (IsIncompleteClassType(RecordTy))
3400       return true;
3401   }
3402 
3403   if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
3404     return ContainsIncompleteClassType(PointerTy->getPointeeType());
3405 
3406   if (const MemberPointerType *MemberPointerTy =
3407       dyn_cast<MemberPointerType>(Ty)) {
3408     // Check if the class type is incomplete.
3409     const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
3410     if (IsIncompleteClassType(ClassType))
3411       return true;
3412 
3413     return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
3414   }
3415 
3416   return false;
3417 }
3418 
3419 // CanUseSingleInheritance - Return whether the given record decl has a "single,
3420 // public, non-virtual base at offset zero (i.e. the derived class is dynamic
3421 // iff the base is)", according to Itanium C++ ABI, 2.95p6b.
3422 static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
3423   // Check the number of bases.
3424   if (RD->getNumBases() != 1)
3425     return false;
3426 
3427   // Get the base.
3428   CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
3429 
3430   // Check that the base is not virtual.
3431   if (Base->isVirtual())
3432     return false;
3433 
3434   // Check that the base is public.
3435   if (Base->getAccessSpecifier() != AS_public)
3436     return false;
3437 
3438   // Check that the class is dynamic iff the base is.
3439   auto *BaseDecl =
3440       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3441   if (!BaseDecl->isEmpty() &&
3442       BaseDecl->isDynamicClass() != RD->isDynamicClass())
3443     return false;
3444 
3445   return true;
3446 }
3447 
3448 void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) {
3449   // abi::__class_type_info.
3450   static const char * const ClassTypeInfo =
3451     "_ZTVN10__cxxabiv117__class_type_infoE";
3452   // abi::__si_class_type_info.
3453   static const char * const SIClassTypeInfo =
3454     "_ZTVN10__cxxabiv120__si_class_type_infoE";
3455   // abi::__vmi_class_type_info.
3456   static const char * const VMIClassTypeInfo =
3457     "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
3458 
3459   const char *VTableName = nullptr;
3460 
3461   switch (Ty->getTypeClass()) {
3462 #define TYPE(Class, Base)
3463 #define ABSTRACT_TYPE(Class, Base)
3464 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3465 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3466 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3467 #include "clang/AST/TypeNodes.inc"
3468     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3469 
3470   case Type::LValueReference:
3471   case Type::RValueReference:
3472     llvm_unreachable("References shouldn't get here");
3473 
3474   case Type::Auto:
3475   case Type::DeducedTemplateSpecialization:
3476     llvm_unreachable("Undeduced type shouldn't get here");
3477 
3478   case Type::Pipe:
3479     llvm_unreachable("Pipe types shouldn't get here");
3480 
3481   case Type::Builtin:
3482   case Type::BitInt:
3483   // GCC treats vector and complex types as fundamental types.
3484   case Type::Vector:
3485   case Type::ExtVector:
3486   case Type::ConstantMatrix:
3487   case Type::Complex:
3488   case Type::Atomic:
3489   // FIXME: GCC treats block pointers as fundamental types?!
3490   case Type::BlockPointer:
3491     // abi::__fundamental_type_info.
3492     VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
3493     break;
3494 
3495   case Type::ConstantArray:
3496   case Type::IncompleteArray:
3497   case Type::VariableArray:
3498     // abi::__array_type_info.
3499     VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
3500     break;
3501 
3502   case Type::FunctionNoProto:
3503   case Type::FunctionProto:
3504     // abi::__function_type_info.
3505     VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
3506     break;
3507 
3508   case Type::Enum:
3509     // abi::__enum_type_info.
3510     VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
3511     break;
3512 
3513   case Type::Record: {
3514     const CXXRecordDecl *RD =
3515       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3516 
3517     if (!RD->hasDefinition() || !RD->getNumBases()) {
3518       VTableName = ClassTypeInfo;
3519     } else if (CanUseSingleInheritance(RD)) {
3520       VTableName = SIClassTypeInfo;
3521     } else {
3522       VTableName = VMIClassTypeInfo;
3523     }
3524 
3525     break;
3526   }
3527 
3528   case Type::ObjCObject:
3529     // Ignore protocol qualifiers.
3530     Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
3531 
3532     // Handle id and Class.
3533     if (isa<BuiltinType>(Ty)) {
3534       VTableName = ClassTypeInfo;
3535       break;
3536     }
3537 
3538     assert(isa<ObjCInterfaceType>(Ty));
3539     LLVM_FALLTHROUGH;
3540 
3541   case Type::ObjCInterface:
3542     if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
3543       VTableName = SIClassTypeInfo;
3544     } else {
3545       VTableName = ClassTypeInfo;
3546     }
3547     break;
3548 
3549   case Type::ObjCObjectPointer:
3550   case Type::Pointer:
3551     // abi::__pointer_type_info.
3552     VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
3553     break;
3554 
3555   case Type::MemberPointer:
3556     // abi::__pointer_to_member_type_info.
3557     VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
3558     break;
3559   }
3560 
3561   llvm::Constant *VTable = nullptr;
3562 
3563   // Check if the alias exists. If it doesn't, then get or create the global.
3564   if (CGM.getItaniumVTableContext().isRelativeLayout())
3565     VTable = CGM.getModule().getNamedAlias(VTableName);
3566   if (!VTable)
3567     VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
3568 
3569   CGM.setDSOLocal(cast<llvm::GlobalValue>(VTable->stripPointerCasts()));
3570 
3571   llvm::Type *PtrDiffTy =
3572       CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
3573 
3574   // The vtable address point is 2.
3575   if (CGM.getItaniumVTableContext().isRelativeLayout()) {
3576     // The vtable address point is 8 bytes after its start:
3577     // 4 for the offset to top + 4 for the relative offset to rtti.
3578     llvm::Constant *Eight = llvm::ConstantInt::get(CGM.Int32Ty, 8);
3579     VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3580     VTable =
3581         llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8Ty, VTable, Eight);
3582   } else {
3583     llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
3584     VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable,
3585                                                           Two);
3586   }
3587   VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
3588 
3589   Fields.push_back(VTable);
3590 }
3591 
3592 /// Return the linkage that the type info and type info name constants
3593 /// should have for the given type.
3594 static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
3595                                                              QualType Ty) {
3596   // Itanium C++ ABI 2.9.5p7:
3597   //   In addition, it and all of the intermediate abi::__pointer_type_info
3598   //   structs in the chain down to the abi::__class_type_info for the
3599   //   incomplete class type must be prevented from resolving to the
3600   //   corresponding type_info structs for the complete class type, possibly
3601   //   by making them local static objects. Finally, a dummy class RTTI is
3602   //   generated for the incomplete type that will not resolve to the final
3603   //   complete class RTTI (because the latter need not exist), possibly by
3604   //   making it a local static object.
3605   if (ContainsIncompleteClassType(Ty))
3606     return llvm::GlobalValue::InternalLinkage;
3607 
3608   switch (Ty->getLinkage()) {
3609   case NoLinkage:
3610   case InternalLinkage:
3611   case UniqueExternalLinkage:
3612     return llvm::GlobalValue::InternalLinkage;
3613 
3614   case VisibleNoLinkage:
3615   case ModuleInternalLinkage:
3616   case ModuleLinkage:
3617   case ExternalLinkage:
3618     // RTTI is not enabled, which means that this type info struct is going
3619     // to be used for exception handling. Give it linkonce_odr linkage.
3620     if (!CGM.getLangOpts().RTTI)
3621       return llvm::GlobalValue::LinkOnceODRLinkage;
3622 
3623     if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
3624       const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
3625       if (RD->hasAttr<WeakAttr>())
3626         return llvm::GlobalValue::WeakODRLinkage;
3627       if (CGM.getTriple().isWindowsItaniumEnvironment())
3628         if (RD->hasAttr<DLLImportAttr>() &&
3629             ShouldUseExternalRTTIDescriptor(CGM, Ty))
3630           return llvm::GlobalValue::ExternalLinkage;
3631       // MinGW always uses LinkOnceODRLinkage for type info.
3632       if (RD->isDynamicClass() &&
3633           !CGM.getContext()
3634                .getTargetInfo()
3635                .getTriple()
3636                .isWindowsGNUEnvironment())
3637         return CGM.getVTableLinkage(RD);
3638     }
3639 
3640     return llvm::GlobalValue::LinkOnceODRLinkage;
3641   }
3642 
3643   llvm_unreachable("Invalid linkage!");
3644 }
3645 
3646 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
3647   // We want to operate on the canonical type.
3648   Ty = Ty.getCanonicalType();
3649 
3650   // Check if we've already emitted an RTTI descriptor for this type.
3651   SmallString<256> Name;
3652   llvm::raw_svector_ostream Out(Name);
3653   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3654 
3655   llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
3656   if (OldGV && !OldGV->isDeclaration()) {
3657     assert(!OldGV->hasAvailableExternallyLinkage() &&
3658            "available_externally typeinfos not yet implemented");
3659 
3660     return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
3661   }
3662 
3663   // Check if there is already an external RTTI descriptor for this type.
3664   if (IsStandardLibraryRTTIDescriptor(Ty) ||
3665       ShouldUseExternalRTTIDescriptor(CGM, Ty))
3666     return GetAddrOfExternalRTTIDescriptor(Ty);
3667 
3668   // Emit the standard library with external linkage.
3669   llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
3670 
3671   // Give the type_info object and name the formal visibility of the
3672   // type itself.
3673   llvm::GlobalValue::VisibilityTypes llvmVisibility;
3674   if (llvm::GlobalValue::isLocalLinkage(Linkage))
3675     // If the linkage is local, only default visibility makes sense.
3676     llvmVisibility = llvm::GlobalValue::DefaultVisibility;
3677   else if (CXXABI.classifyRTTIUniqueness(Ty, Linkage) ==
3678            ItaniumCXXABI::RUK_NonUniqueHidden)
3679     llvmVisibility = llvm::GlobalValue::HiddenVisibility;
3680   else
3681     llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
3682 
3683   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
3684       llvm::GlobalValue::DefaultStorageClass;
3685   if (auto RD = Ty->getAsCXXRecordDecl()) {
3686     if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
3687          RD->hasAttr<DLLExportAttr>()) ||
3688         (CGM.shouldMapVisibilityToDLLExport(RD) &&
3689          !llvm::GlobalValue::isLocalLinkage(Linkage) &&
3690          llvmVisibility == llvm::GlobalValue::DefaultVisibility))
3691       DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
3692   }
3693   return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
3694 }
3695 
3696 llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
3697       QualType Ty,
3698       llvm::GlobalVariable::LinkageTypes Linkage,
3699       llvm::GlobalValue::VisibilityTypes Visibility,
3700       llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass) {
3701   // Add the vtable pointer.
3702   BuildVTablePointer(cast<Type>(Ty));
3703 
3704   // And the name.
3705   llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
3706   llvm::Constant *TypeNameField;
3707 
3708   // If we're supposed to demote the visibility, be sure to set a flag
3709   // to use a string comparison for type_info comparisons.
3710   ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness =
3711       CXXABI.classifyRTTIUniqueness(Ty, Linkage);
3712   if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) {
3713     // The flag is the sign bit, which on ARM64 is defined to be clear
3714     // for global pointers.  This is very ARM64-specific.
3715     TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty);
3716     llvm::Constant *flag =
3717         llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63);
3718     TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag);
3719     TypeNameField =
3720         llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy);
3721   } else {
3722     TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy);
3723   }
3724   Fields.push_back(TypeNameField);
3725 
3726   switch (Ty->getTypeClass()) {
3727 #define TYPE(Class, Base)
3728 #define ABSTRACT_TYPE(Class, Base)
3729 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
3730 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
3731 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
3732 #include "clang/AST/TypeNodes.inc"
3733     llvm_unreachable("Non-canonical and dependent types shouldn't get here");
3734 
3735   // GCC treats vector types as fundamental types.
3736   case Type::Builtin:
3737   case Type::Vector:
3738   case Type::ExtVector:
3739   case Type::ConstantMatrix:
3740   case Type::Complex:
3741   case Type::BlockPointer:
3742     // Itanium C++ ABI 2.9.5p4:
3743     // abi::__fundamental_type_info adds no data members to std::type_info.
3744     break;
3745 
3746   case Type::LValueReference:
3747   case Type::RValueReference:
3748     llvm_unreachable("References shouldn't get here");
3749 
3750   case Type::Auto:
3751   case Type::DeducedTemplateSpecialization:
3752     llvm_unreachable("Undeduced type shouldn't get here");
3753 
3754   case Type::Pipe:
3755     break;
3756 
3757   case Type::BitInt:
3758     break;
3759 
3760   case Type::ConstantArray:
3761   case Type::IncompleteArray:
3762   case Type::VariableArray:
3763     // Itanium C++ ABI 2.9.5p5:
3764     // abi::__array_type_info adds no data members to std::type_info.
3765     break;
3766 
3767   case Type::FunctionNoProto:
3768   case Type::FunctionProto:
3769     // Itanium C++ ABI 2.9.5p5:
3770     // abi::__function_type_info adds no data members to std::type_info.
3771     break;
3772 
3773   case Type::Enum:
3774     // Itanium C++ ABI 2.9.5p5:
3775     // abi::__enum_type_info adds no data members to std::type_info.
3776     break;
3777 
3778   case Type::Record: {
3779     const CXXRecordDecl *RD =
3780       cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
3781     if (!RD->hasDefinition() || !RD->getNumBases()) {
3782       // We don't need to emit any fields.
3783       break;
3784     }
3785 
3786     if (CanUseSingleInheritance(RD))
3787       BuildSIClassTypeInfo(RD);
3788     else
3789       BuildVMIClassTypeInfo(RD);
3790 
3791     break;
3792   }
3793 
3794   case Type::ObjCObject:
3795   case Type::ObjCInterface:
3796     BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
3797     break;
3798 
3799   case Type::ObjCObjectPointer:
3800     BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
3801     break;
3802 
3803   case Type::Pointer:
3804     BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
3805     break;
3806 
3807   case Type::MemberPointer:
3808     BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
3809     break;
3810 
3811   case Type::Atomic:
3812     // No fields, at least for the moment.
3813     break;
3814   }
3815 
3816   llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
3817 
3818   SmallString<256> Name;
3819   llvm::raw_svector_ostream Out(Name);
3820   CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
3821   llvm::Module &M = CGM.getModule();
3822   llvm::GlobalVariable *OldGV = M.getNamedGlobal(Name);
3823   llvm::GlobalVariable *GV =
3824       new llvm::GlobalVariable(M, Init->getType(),
3825                                /*isConstant=*/true, Linkage, Init, Name);
3826 
3827   // Export the typeinfo in the same circumstances as the vtable is exported.
3828   auto GVDLLStorageClass = DLLStorageClass;
3829   if (CGM.getTarget().hasPS4DLLImportExport()) {
3830     if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
3831       const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
3832       if (RD->hasAttr<DLLExportAttr>() ||
3833           CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) {
3834         GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
3835       }
3836     }
3837   }
3838 
3839   // If there's already an old global variable, replace it with the new one.
3840   if (OldGV) {
3841     GV->takeName(OldGV);
3842     llvm::Constant *NewPtr =
3843       llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
3844     OldGV->replaceAllUsesWith(NewPtr);
3845     OldGV->eraseFromParent();
3846   }
3847 
3848   if (CGM.supportsCOMDAT() && GV->isWeakForLinker())
3849     GV->setComdat(M.getOrInsertComdat(GV->getName()));
3850 
3851   CharUnits Align =
3852       CGM.getContext().toCharUnitsFromBits(CGM.getTarget().getPointerAlign(0));
3853   GV->setAlignment(Align.getAsAlign());
3854 
3855   // The Itanium ABI specifies that type_info objects must be globally
3856   // unique, with one exception: if the type is an incomplete class
3857   // type or a (possibly indirect) pointer to one.  That exception
3858   // affects the general case of comparing type_info objects produced
3859   // by the typeid operator, which is why the comparison operators on
3860   // std::type_info generally use the type_info name pointers instead
3861   // of the object addresses.  However, the language's built-in uses
3862   // of RTTI generally require class types to be complete, even when
3863   // manipulating pointers to those class types.  This allows the
3864   // implementation of dynamic_cast to rely on address equality tests,
3865   // which is much faster.
3866 
3867   // All of this is to say that it's important that both the type_info
3868   // object and the type_info name be uniqued when weakly emitted.
3869 
3870   TypeName->setVisibility(Visibility);
3871   CGM.setDSOLocal(TypeName);
3872 
3873   GV->setVisibility(Visibility);
3874   CGM.setDSOLocal(GV);
3875 
3876   TypeName->setDLLStorageClass(DLLStorageClass);
3877   GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport()
3878                              ? GVDLLStorageClass
3879                              : DLLStorageClass);
3880 
3881   TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3882   GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
3883 
3884   return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
3885 }
3886 
3887 /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
3888 /// for the given Objective-C object type.
3889 void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
3890   // Drop qualifiers.
3891   const Type *T = OT->getBaseType().getTypePtr();
3892   assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
3893 
3894   // The builtin types are abi::__class_type_infos and don't require
3895   // extra fields.
3896   if (isa<BuiltinType>(T)) return;
3897 
3898   ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
3899   ObjCInterfaceDecl *Super = Class->getSuperClass();
3900 
3901   // Root classes are also __class_type_info.
3902   if (!Super) return;
3903 
3904   QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
3905 
3906   // Everything else is single inheritance.
3907   llvm::Constant *BaseTypeInfo =
3908       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy);
3909   Fields.push_back(BaseTypeInfo);
3910 }
3911 
3912 /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
3913 /// inheritance, according to the Itanium C++ ABI, 2.95p6b.
3914 void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
3915   // Itanium C++ ABI 2.9.5p6b:
3916   // It adds to abi::__class_type_info a single member pointing to the
3917   // type_info structure for the base type,
3918   llvm::Constant *BaseTypeInfo =
3919     ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType());
3920   Fields.push_back(BaseTypeInfo);
3921 }
3922 
3923 namespace {
3924   /// SeenBases - Contains virtual and non-virtual bases seen when traversing
3925   /// a class hierarchy.
3926   struct SeenBases {
3927     llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
3928     llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
3929   };
3930 }
3931 
3932 /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
3933 /// abi::__vmi_class_type_info.
3934 ///
3935 static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
3936                                              SeenBases &Bases) {
3937 
3938   unsigned Flags = 0;
3939 
3940   auto *BaseDecl =
3941       cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
3942 
3943   if (Base->isVirtual()) {
3944     // Mark the virtual base as seen.
3945     if (!Bases.VirtualBases.insert(BaseDecl).second) {
3946       // If this virtual base has been seen before, then the class is diamond
3947       // shaped.
3948       Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped;
3949     } else {
3950       if (Bases.NonVirtualBases.count(BaseDecl))
3951         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3952     }
3953   } else {
3954     // Mark the non-virtual base as seen.
3955     if (!Bases.NonVirtualBases.insert(BaseDecl).second) {
3956       // If this non-virtual base has been seen before, then the class has non-
3957       // diamond shaped repeated inheritance.
3958       Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3959     } else {
3960       if (Bases.VirtualBases.count(BaseDecl))
3961         Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat;
3962     }
3963   }
3964 
3965   // Walk all bases.
3966   for (const auto &I : BaseDecl->bases())
3967     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3968 
3969   return Flags;
3970 }
3971 
3972 static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
3973   unsigned Flags = 0;
3974   SeenBases Bases;
3975 
3976   // Walk all bases.
3977   for (const auto &I : RD->bases())
3978     Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases);
3979 
3980   return Flags;
3981 }
3982 
3983 /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
3984 /// classes with bases that do not satisfy the abi::__si_class_type_info
3985 /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
3986 void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
3987   llvm::Type *UnsignedIntLTy =
3988     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
3989 
3990   // Itanium C++ ABI 2.9.5p6c:
3991   //   __flags is a word with flags describing details about the class
3992   //   structure, which may be referenced by using the __flags_masks
3993   //   enumeration. These flags refer to both direct and indirect bases.
3994   unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
3995   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
3996 
3997   // Itanium C++ ABI 2.9.5p6c:
3998   //   __base_count is a word with the number of direct proper base class
3999   //   descriptions that follow.
4000   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
4001 
4002   if (!RD->getNumBases())
4003     return;
4004 
4005   // Now add the base class descriptions.
4006 
4007   // Itanium C++ ABI 2.9.5p6c:
4008   //   __base_info[] is an array of base class descriptions -- one for every
4009   //   direct proper base. Each description is of the type:
4010   //
4011   //   struct abi::__base_class_type_info {
4012   //   public:
4013   //     const __class_type_info *__base_type;
4014   //     long __offset_flags;
4015   //
4016   //     enum __offset_flags_masks {
4017   //       __virtual_mask = 0x1,
4018   //       __public_mask = 0x2,
4019   //       __offset_shift = 8
4020   //     };
4021   //   };
4022 
4023   // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
4024   // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
4025   // LLP64 platforms.
4026   // FIXME: Consider updating libc++abi to match, and extend this logic to all
4027   // LLP64 platforms.
4028   QualType OffsetFlagsTy = CGM.getContext().LongTy;
4029   const TargetInfo &TI = CGM.getContext().getTargetInfo();
4030   if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
4031     OffsetFlagsTy = CGM.getContext().LongLongTy;
4032   llvm::Type *OffsetFlagsLTy =
4033       CGM.getTypes().ConvertType(OffsetFlagsTy);
4034 
4035   for (const auto &Base : RD->bases()) {
4036     // The __base_type member points to the RTTI for the base type.
4037     Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
4038 
4039     auto *BaseDecl =
4040         cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
4041 
4042     int64_t OffsetFlags = 0;
4043 
4044     // All but the lower 8 bits of __offset_flags are a signed offset.
4045     // For a non-virtual base, this is the offset in the object of the base
4046     // subobject. For a virtual base, this is the offset in the virtual table of
4047     // the virtual base offset for the virtual base referenced (negative).
4048     CharUnits Offset;
4049     if (Base.isVirtual())
4050       Offset =
4051         CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
4052     else {
4053       const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
4054       Offset = Layout.getBaseClassOffset(BaseDecl);
4055     };
4056 
4057     OffsetFlags = uint64_t(Offset.getQuantity()) << 8;
4058 
4059     // The low-order byte of __offset_flags contains flags, as given by the
4060     // masks from the enumeration __offset_flags_masks.
4061     if (Base.isVirtual())
4062       OffsetFlags |= BCTI_Virtual;
4063     if (Base.getAccessSpecifier() == AS_public)
4064       OffsetFlags |= BCTI_Public;
4065 
4066     Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
4067   }
4068 }
4069 
4070 /// Compute the flags for a __pbase_type_info, and remove the corresponding
4071 /// pieces from \p Type.
4072 static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
4073   unsigned Flags = 0;
4074 
4075   if (Type.isConstQualified())
4076     Flags |= ItaniumRTTIBuilder::PTI_Const;
4077   if (Type.isVolatileQualified())
4078     Flags |= ItaniumRTTIBuilder::PTI_Volatile;
4079   if (Type.isRestrictQualified())
4080     Flags |= ItaniumRTTIBuilder::PTI_Restrict;
4081   Type = Type.getUnqualifiedType();
4082 
4083   // Itanium C++ ABI 2.9.5p7:
4084   //   When the abi::__pbase_type_info is for a direct or indirect pointer to an
4085   //   incomplete class type, the incomplete target type flag is set.
4086   if (ContainsIncompleteClassType(Type))
4087     Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
4088 
4089   if (auto *Proto = Type->getAs<FunctionProtoType>()) {
4090     if (Proto->isNothrow()) {
4091       Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
4092       Type = Ctx.getFunctionTypeWithExceptionSpec(Type, EST_None);
4093     }
4094   }
4095 
4096   return Flags;
4097 }
4098 
4099 /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
4100 /// used for pointer types.
4101 void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
4102   // Itanium C++ ABI 2.9.5p7:
4103   //   __flags is a flag word describing the cv-qualification and other
4104   //   attributes of the type pointed to
4105   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4106 
4107   llvm::Type *UnsignedIntLTy =
4108     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4109   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4110 
4111   // Itanium C++ ABI 2.9.5p7:
4112   //  __pointee is a pointer to the std::type_info derivation for the
4113   //  unqualified type being pointed to.
4114   llvm::Constant *PointeeTypeInfo =
4115       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4116   Fields.push_back(PointeeTypeInfo);
4117 }
4118 
4119 /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
4120 /// struct, used for member pointer types.
4121 void
4122 ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
4123   QualType PointeeTy = Ty->getPointeeType();
4124 
4125   // Itanium C++ ABI 2.9.5p7:
4126   //   __flags is a flag word describing the cv-qualification and other
4127   //   attributes of the type pointed to.
4128   unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
4129 
4130   const RecordType *ClassType = cast<RecordType>(Ty->getClass());
4131   if (IsIncompleteClassType(ClassType))
4132     Flags |= PTI_ContainingClassIncomplete;
4133 
4134   llvm::Type *UnsignedIntLTy =
4135     CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
4136   Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
4137 
4138   // Itanium C++ ABI 2.9.5p7:
4139   //   __pointee is a pointer to the std::type_info derivation for the
4140   //   unqualified type being pointed to.
4141   llvm::Constant *PointeeTypeInfo =
4142       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
4143   Fields.push_back(PointeeTypeInfo);
4144 
4145   // Itanium C++ ABI 2.9.5p9:
4146   //   __context is a pointer to an abi::__class_type_info corresponding to the
4147   //   class type containing the member pointed to
4148   //   (e.g., the "A" in "int A::*").
4149   Fields.push_back(
4150       ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
4151 }
4152 
4153 llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
4154   return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
4155 }
4156 
4157 void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
4158   // Types added here must also be added to TypeInfoIsInStandardLibrary.
4159   QualType FundamentalTypes[] = {
4160       getContext().VoidTy,             getContext().NullPtrTy,
4161       getContext().BoolTy,             getContext().WCharTy,
4162       getContext().CharTy,             getContext().UnsignedCharTy,
4163       getContext().SignedCharTy,       getContext().ShortTy,
4164       getContext().UnsignedShortTy,    getContext().IntTy,
4165       getContext().UnsignedIntTy,      getContext().LongTy,
4166       getContext().UnsignedLongTy,     getContext().LongLongTy,
4167       getContext().UnsignedLongLongTy, getContext().Int128Ty,
4168       getContext().UnsignedInt128Ty,   getContext().HalfTy,
4169       getContext().FloatTy,            getContext().DoubleTy,
4170       getContext().LongDoubleTy,       getContext().Float128Ty,
4171       getContext().Char8Ty,            getContext().Char16Ty,
4172       getContext().Char32Ty
4173   };
4174   llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
4175       RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(RD)
4176           ? llvm::GlobalValue::DLLExportStorageClass
4177           : llvm::GlobalValue::DefaultStorageClass;
4178   llvm::GlobalValue::VisibilityTypes Visibility =
4179       CodeGenModule::GetLLVMVisibility(RD->getVisibility());
4180   for (const QualType &FundamentalType : FundamentalTypes) {
4181     QualType PointerType = getContext().getPointerType(FundamentalType);
4182     QualType PointerTypeConst = getContext().getPointerType(
4183         FundamentalType.withConst());
4184     for (QualType Type : {FundamentalType, PointerType, PointerTypeConst})
4185       ItaniumRTTIBuilder(*this).BuildTypeInfo(
4186           Type, llvm::GlobalValue::ExternalLinkage,
4187           Visibility, DLLStorageClass);
4188   }
4189 }
4190 
4191 /// What sort of uniqueness rules should we use for the RTTI for the
4192 /// given type?
4193 ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness(
4194     QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const {
4195   if (shouldRTTIBeUnique())
4196     return RUK_Unique;
4197 
4198   // It's only necessary for linkonce_odr or weak_odr linkage.
4199   if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage &&
4200       Linkage != llvm::GlobalValue::WeakODRLinkage)
4201     return RUK_Unique;
4202 
4203   // It's only necessary with default visibility.
4204   if (CanTy->getVisibility() != DefaultVisibility)
4205     return RUK_Unique;
4206 
4207   // If we're not required to publish this symbol, hide it.
4208   if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
4209     return RUK_NonUniqueHidden;
4210 
4211   // If we're required to publish this symbol, as we might be under an
4212   // explicit instantiation, leave it with default visibility but
4213   // enable string-comparisons.
4214   assert(Linkage == llvm::GlobalValue::WeakODRLinkage);
4215   return RUK_NonUniqueVisible;
4216 }
4217 
4218 // Find out how to codegen the complete destructor and constructor
4219 namespace {
4220 enum class StructorCodegen { Emit, RAUW, Alias, COMDAT };
4221 }
4222 static StructorCodegen getCodegenToUse(CodeGenModule &CGM,
4223                                        const CXXMethodDecl *MD) {
4224   if (!CGM.getCodeGenOpts().CXXCtorDtorAliases)
4225     return StructorCodegen::Emit;
4226 
4227   // The complete and base structors are not equivalent if there are any virtual
4228   // bases, so emit separate functions.
4229   if (MD->getParent()->getNumVBases())
4230     return StructorCodegen::Emit;
4231 
4232   GlobalDecl AliasDecl;
4233   if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
4234     AliasDecl = GlobalDecl(DD, Dtor_Complete);
4235   } else {
4236     const auto *CD = cast<CXXConstructorDecl>(MD);
4237     AliasDecl = GlobalDecl(CD, Ctor_Complete);
4238   }
4239   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4240 
4241   if (llvm::GlobalValue::isDiscardableIfUnused(Linkage))
4242     return StructorCodegen::RAUW;
4243 
4244   // FIXME: Should we allow available_externally aliases?
4245   if (!llvm::GlobalAlias::isValidLinkage(Linkage))
4246     return StructorCodegen::RAUW;
4247 
4248   if (llvm::GlobalValue::isWeakForLinker(Linkage)) {
4249     // Only ELF and wasm support COMDATs with arbitrary names (C5/D5).
4250     if (CGM.getTarget().getTriple().isOSBinFormatELF() ||
4251         CGM.getTarget().getTriple().isOSBinFormatWasm())
4252       return StructorCodegen::COMDAT;
4253     return StructorCodegen::Emit;
4254   }
4255 
4256   return StructorCodegen::Alias;
4257 }
4258 
4259 static void emitConstructorDestructorAlias(CodeGenModule &CGM,
4260                                            GlobalDecl AliasDecl,
4261                                            GlobalDecl TargetDecl) {
4262   llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl);
4263 
4264   StringRef MangledName = CGM.getMangledName(AliasDecl);
4265   llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName);
4266   if (Entry && !Entry->isDeclaration())
4267     return;
4268 
4269   auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl));
4270 
4271   // Create the alias with no name.
4272   auto *Alias = llvm::GlobalAlias::create(Linkage, "", Aliasee);
4273 
4274   // Constructors and destructors are always unnamed_addr.
4275   Alias->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4276 
4277   // Switch any previous uses to the alias.
4278   if (Entry) {
4279     assert(Entry->getType() == Aliasee->getType() &&
4280            "declaration exists with different type");
4281     Alias->takeName(Entry);
4282     Entry->replaceAllUsesWith(Alias);
4283     Entry->eraseFromParent();
4284   } else {
4285     Alias->setName(MangledName);
4286   }
4287 
4288   // Finally, set up the alias with its proper name and attributes.
4289   CGM.SetCommonAttributes(AliasDecl, Alias);
4290 }
4291 
4292 void ItaniumCXXABI::emitCXXStructor(GlobalDecl GD) {
4293   auto *MD = cast<CXXMethodDecl>(GD.getDecl());
4294   auto *CD = dyn_cast<CXXConstructorDecl>(MD);
4295   const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD);
4296 
4297   StructorCodegen CGType = getCodegenToUse(CGM, MD);
4298 
4299   if (CD ? GD.getCtorType() == Ctor_Complete
4300          : GD.getDtorType() == Dtor_Complete) {
4301     GlobalDecl BaseDecl;
4302     if (CD)
4303       BaseDecl = GD.getWithCtorType(Ctor_Base);
4304     else
4305       BaseDecl = GD.getWithDtorType(Dtor_Base);
4306 
4307     if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) {
4308       emitConstructorDestructorAlias(CGM, GD, BaseDecl);
4309       return;
4310     }
4311 
4312     if (CGType == StructorCodegen::RAUW) {
4313       StringRef MangledName = CGM.getMangledName(GD);
4314       auto *Aliasee = CGM.GetAddrOfGlobal(BaseDecl);
4315       CGM.addReplacement(MangledName, Aliasee);
4316       return;
4317     }
4318   }
4319 
4320   // The base destructor is equivalent to the base destructor of its
4321   // base class if there is exactly one non-virtual base class with a
4322   // non-trivial destructor, there are no fields with a non-trivial
4323   // destructor, and the body of the destructor is trivial.
4324   if (DD && GD.getDtorType() == Dtor_Base &&
4325       CGType != StructorCodegen::COMDAT &&
4326       !CGM.TryEmitBaseDestructorAsAlias(DD))
4327     return;
4328 
4329   // FIXME: The deleting destructor is equivalent to the selected operator
4330   // delete if:
4331   //  * either the delete is a destroying operator delete or the destructor
4332   //    would be trivial if it weren't virtual,
4333   //  * the conversion from the 'this' parameter to the first parameter of the
4334   //    destructor is equivalent to a bitcast,
4335   //  * the destructor does not have an implicit "this" return, and
4336   //  * the operator delete has the same calling convention and IR function type
4337   //    as the destructor.
4338   // In such cases we should try to emit the deleting dtor as an alias to the
4339   // selected 'operator delete'.
4340 
4341   llvm::Function *Fn = CGM.codegenCXXStructor(GD);
4342 
4343   if (CGType == StructorCodegen::COMDAT) {
4344     SmallString<256> Buffer;
4345     llvm::raw_svector_ostream Out(Buffer);
4346     if (DD)
4347       getMangleContext().mangleCXXDtorComdat(DD, Out);
4348     else
4349       getMangleContext().mangleCXXCtorComdat(CD, Out);
4350     llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str());
4351     Fn->setComdat(C);
4352   } else {
4353     CGM.maybeSetTrivialComdat(*MD, *Fn);
4354   }
4355 }
4356 
4357 static llvm::FunctionCallee getBeginCatchFn(CodeGenModule &CGM) {
4358   // void *__cxa_begin_catch(void*);
4359   llvm::FunctionType *FTy = llvm::FunctionType::get(
4360       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4361 
4362   return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
4363 }
4364 
4365 static llvm::FunctionCallee getEndCatchFn(CodeGenModule &CGM) {
4366   // void __cxa_end_catch();
4367   llvm::FunctionType *FTy =
4368       llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4369 
4370   return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
4371 }
4372 
4373 static llvm::FunctionCallee getGetExceptionPtrFn(CodeGenModule &CGM) {
4374   // void *__cxa_get_exception_ptr(void*);
4375   llvm::FunctionType *FTy = llvm::FunctionType::get(
4376       CGM.Int8PtrTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4377 
4378   return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
4379 }
4380 
4381 namespace {
4382   /// A cleanup to call __cxa_end_catch.  In many cases, the caught
4383   /// exception type lets us state definitively that the thrown exception
4384   /// type does not have a destructor.  In particular:
4385   ///   - Catch-alls tell us nothing, so we have to conservatively
4386   ///     assume that the thrown exception might have a destructor.
4387   ///   - Catches by reference behave according to their base types.
4388   ///   - Catches of non-record types will only trigger for exceptions
4389   ///     of non-record types, which never have destructors.
4390   ///   - Catches of record types can trigger for arbitrary subclasses
4391   ///     of the caught type, so we have to assume the actual thrown
4392   ///     exception type might have a throwing destructor, even if the
4393   ///     caught type's destructor is trivial or nothrow.
4394   struct CallEndCatch final : EHScopeStack::Cleanup {
4395     CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
4396     bool MightThrow;
4397 
4398     void Emit(CodeGenFunction &CGF, Flags flags) override {
4399       if (!MightThrow) {
4400         CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM));
4401         return;
4402       }
4403 
4404       CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM));
4405     }
4406   };
4407 }
4408 
4409 /// Emits a call to __cxa_begin_catch and enters a cleanup to call
4410 /// __cxa_end_catch.
4411 ///
4412 /// \param EndMightThrow - true if __cxa_end_catch might throw
4413 static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
4414                                    llvm::Value *Exn,
4415                                    bool EndMightThrow) {
4416   llvm::CallInst *call =
4417     CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn);
4418 
4419   CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
4420 
4421   return call;
4422 }
4423 
4424 /// A "special initializer" callback for initializing a catch
4425 /// parameter during catch initialization.
4426 static void InitCatchParam(CodeGenFunction &CGF,
4427                            const VarDecl &CatchParam,
4428                            Address ParamAddr,
4429                            SourceLocation Loc) {
4430   // Load the exception from where the landing pad saved it.
4431   llvm::Value *Exn = CGF.getExceptionFromSlot();
4432 
4433   CanQualType CatchType =
4434     CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
4435   llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
4436 
4437   // If we're catching by reference, we can just cast the object
4438   // pointer to the appropriate pointer.
4439   if (isa<ReferenceType>(CatchType)) {
4440     QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
4441     bool EndCatchMightThrow = CaughtType->isRecordType();
4442 
4443     // __cxa_begin_catch returns the adjusted object pointer.
4444     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
4445 
4446     // We have no way to tell the personality function that we're
4447     // catching by reference, so if we're catching a pointer,
4448     // __cxa_begin_catch will actually return that pointer by value.
4449     if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
4450       QualType PointeeType = PT->getPointeeType();
4451 
4452       // When catching by reference, generally we should just ignore
4453       // this by-value pointer and use the exception object instead.
4454       if (!PointeeType->isRecordType()) {
4455 
4456         // Exn points to the struct _Unwind_Exception header, which
4457         // we have to skip past in order to reach the exception data.
4458         unsigned HeaderSize =
4459           CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
4460         AdjustedExn =
4461             CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize);
4462 
4463       // However, if we're catching a pointer-to-record type that won't
4464       // work, because the personality function might have adjusted
4465       // the pointer.  There's actually no way for us to fully satisfy
4466       // the language/ABI contract here:  we can't use Exn because it
4467       // might have the wrong adjustment, but we can't use the by-value
4468       // pointer because it's off by a level of abstraction.
4469       //
4470       // The current solution is to dump the adjusted pointer into an
4471       // alloca, which breaks language semantics (because changing the
4472       // pointer doesn't change the exception) but at least works.
4473       // The better solution would be to filter out non-exact matches
4474       // and rethrow them, but this is tricky because the rethrow
4475       // really needs to be catchable by other sites at this landing
4476       // pad.  The best solution is to fix the personality function.
4477       } else {
4478         // Pull the pointer for the reference type off.
4479         llvm::Type *PtrTy = CGF.ConvertTypeForMem(CaughtType);
4480 
4481         // Create the temporary and write the adjusted pointer into it.
4482         Address ExnPtrTmp =
4483           CGF.CreateTempAlloca(PtrTy, CGF.getPointerAlign(), "exn.byref.tmp");
4484         llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4485         CGF.Builder.CreateStore(Casted, ExnPtrTmp);
4486 
4487         // Bind the reference to the temporary.
4488         AdjustedExn = ExnPtrTmp.getPointer();
4489       }
4490     }
4491 
4492     llvm::Value *ExnCast =
4493       CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
4494     CGF.Builder.CreateStore(ExnCast, ParamAddr);
4495     return;
4496   }
4497 
4498   // Scalars and complexes.
4499   TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType);
4500   if (TEK != TEK_Aggregate) {
4501     llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
4502 
4503     // If the catch type is a pointer type, __cxa_begin_catch returns
4504     // the pointer by value.
4505     if (CatchType->hasPointerRepresentation()) {
4506       llvm::Value *CastExn =
4507         CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
4508 
4509       switch (CatchType.getQualifiers().getObjCLifetime()) {
4510       case Qualifiers::OCL_Strong:
4511         CastExn = CGF.EmitARCRetainNonBlock(CastExn);
4512         LLVM_FALLTHROUGH;
4513 
4514       case Qualifiers::OCL_None:
4515       case Qualifiers::OCL_ExplicitNone:
4516       case Qualifiers::OCL_Autoreleasing:
4517         CGF.Builder.CreateStore(CastExn, ParamAddr);
4518         return;
4519 
4520       case Qualifiers::OCL_Weak:
4521         CGF.EmitARCInitWeak(ParamAddr, CastExn);
4522         return;
4523       }
4524       llvm_unreachable("bad ownership qualifier!");
4525     }
4526 
4527     // Otherwise, it returns a pointer into the exception object.
4528 
4529     llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4530     llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
4531 
4532     LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType);
4533     LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType);
4534     switch (TEK) {
4535     case TEK_Complex:
4536       CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
4537                              /*init*/ true);
4538       return;
4539     case TEK_Scalar: {
4540       llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
4541       CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
4542       return;
4543     }
4544     case TEK_Aggregate:
4545       llvm_unreachable("evaluation kind filtered out!");
4546     }
4547     llvm_unreachable("bad evaluation kind");
4548   }
4549 
4550   assert(isa<RecordType>(CatchType) && "unexpected catch type!");
4551   auto catchRD = CatchType->getAsCXXRecordDecl();
4552   CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
4553 
4554   llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
4555 
4556   // Check for a copy expression.  If we don't have a copy expression,
4557   // that means a trivial copy is okay.
4558   const Expr *copyExpr = CatchParam.getInit();
4559   if (!copyExpr) {
4560     llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
4561     Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4562                         LLVMCatchTy, caughtExnAlignment);
4563     LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
4564     LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
4565     CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
4566     return;
4567   }
4568 
4569   // We have to call __cxa_get_exception_ptr to get the adjusted
4570   // pointer before copying.
4571   llvm::CallInst *rawAdjustedExn =
4572     CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn);
4573 
4574   // Cast that to the appropriate type.
4575   Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
4576                       LLVMCatchTy, caughtExnAlignment);
4577 
4578   // The copy expression is defined in terms of an OpaqueValueExpr.
4579   // Find it and map it to the adjusted expression.
4580   CodeGenFunction::OpaqueValueMapping
4581     opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
4582            CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
4583 
4584   // Call the copy ctor in a terminate scope.
4585   CGF.EHStack.pushTerminate();
4586 
4587   // Perform the copy construction.
4588   CGF.EmitAggExpr(copyExpr,
4589                   AggValueSlot::forAddr(ParamAddr, Qualifiers(),
4590                                         AggValueSlot::IsNotDestructed,
4591                                         AggValueSlot::DoesNotNeedGCBarriers,
4592                                         AggValueSlot::IsNotAliased,
4593                                         AggValueSlot::DoesNotOverlap));
4594 
4595   // Leave the terminate scope.
4596   CGF.EHStack.popTerminate();
4597 
4598   // Undo the opaque value mapping.
4599   opaque.pop();
4600 
4601   // Finally we can call __cxa_begin_catch.
4602   CallBeginCatch(CGF, Exn, true);
4603 }
4604 
4605 /// Begins a catch statement by initializing the catch variable and
4606 /// calling __cxa_begin_catch.
4607 void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4608                                    const CXXCatchStmt *S) {
4609   // We have to be very careful with the ordering of cleanups here:
4610   //   C++ [except.throw]p4:
4611   //     The destruction [of the exception temporary] occurs
4612   //     immediately after the destruction of the object declared in
4613   //     the exception-declaration in the handler.
4614   //
4615   // So the precise ordering is:
4616   //   1.  Construct catch variable.
4617   //   2.  __cxa_begin_catch
4618   //   3.  Enter __cxa_end_catch cleanup
4619   //   4.  Enter dtor cleanup
4620   //
4621   // We do this by using a slightly abnormal initialization process.
4622   // Delegation sequence:
4623   //   - ExitCXXTryStmt opens a RunCleanupsScope
4624   //     - EmitAutoVarAlloca creates the variable and debug info
4625   //       - InitCatchParam initializes the variable from the exception
4626   //       - CallBeginCatch calls __cxa_begin_catch
4627   //       - CallBeginCatch enters the __cxa_end_catch cleanup
4628   //     - EmitAutoVarCleanups enters the variable destructor cleanup
4629   //   - EmitCXXTryStmt emits the code for the catch body
4630   //   - EmitCXXTryStmt close the RunCleanupsScope
4631 
4632   VarDecl *CatchParam = S->getExceptionDecl();
4633   if (!CatchParam) {
4634     llvm::Value *Exn = CGF.getExceptionFromSlot();
4635     CallBeginCatch(CGF, Exn, true);
4636     return;
4637   }
4638 
4639   // Emit the local.
4640   CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
4641   InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getBeginLoc());
4642   CGF.EmitAutoVarCleanups(var);
4643 }
4644 
4645 /// Get or define the following function:
4646 ///   void @__clang_call_terminate(i8* %exn) nounwind noreturn
4647 /// This code is used only in C++.
4648 static llvm::FunctionCallee getClangCallTerminateFn(CodeGenModule &CGM) {
4649   llvm::FunctionType *fnTy =
4650     llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*isVarArg=*/false);
4651   llvm::FunctionCallee fnRef = CGM.CreateRuntimeFunction(
4652       fnTy, "__clang_call_terminate", llvm::AttributeList(), /*Local=*/true);
4653   llvm::Function *fn =
4654       cast<llvm::Function>(fnRef.getCallee()->stripPointerCasts());
4655   if (fn->empty()) {
4656     fn->setDoesNotThrow();
4657     fn->setDoesNotReturn();
4658 
4659     // What we really want is to massively penalize inlining without
4660     // forbidding it completely.  The difference between that and
4661     // 'noinline' is negligible.
4662     fn->addFnAttr(llvm::Attribute::NoInline);
4663 
4664     // Allow this function to be shared across translation units, but
4665     // we don't want it to turn into an exported symbol.
4666     fn->setLinkage(llvm::Function::LinkOnceODRLinkage);
4667     fn->setVisibility(llvm::Function::HiddenVisibility);
4668     if (CGM.supportsCOMDAT())
4669       fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName()));
4670 
4671     // Set up the function.
4672     llvm::BasicBlock *entry =
4673         llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn);
4674     CGBuilderTy builder(CGM, entry);
4675 
4676     // Pull the exception pointer out of the parameter list.
4677     llvm::Value *exn = &*fn->arg_begin();
4678 
4679     // Call __cxa_begin_catch(exn).
4680     llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn);
4681     catchCall->setDoesNotThrow();
4682     catchCall->setCallingConv(CGM.getRuntimeCC());
4683 
4684     // Call std::terminate().
4685     llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn());
4686     termCall->setDoesNotThrow();
4687     termCall->setDoesNotReturn();
4688     termCall->setCallingConv(CGM.getRuntimeCC());
4689 
4690     // std::terminate cannot return.
4691     builder.CreateUnreachable();
4692   }
4693   return fnRef;
4694 }
4695 
4696 llvm::CallInst *
4697 ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4698                                                    llvm::Value *Exn) {
4699   // In C++, we want to call __cxa_begin_catch() before terminating.
4700   if (Exn) {
4701     assert(CGF.CGM.getLangOpts().CPlusPlus);
4702     return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn);
4703   }
4704   return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn());
4705 }
4706 
4707 std::pair<llvm::Value *, const CXXRecordDecl *>
4708 ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
4709                              const CXXRecordDecl *RD) {
4710   return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
4711 }
4712 
4713 void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
4714                                        const CXXCatchStmt *C) {
4715   if (CGF.getTarget().hasFeature("exception-handling"))
4716     CGF.EHStack.pushCleanup<CatchRetScope>(
4717         NormalCleanup, cast<llvm::CatchPadInst>(CGF.CurrentFuncletPad));
4718   ItaniumCXXABI::emitBeginCatch(CGF, C);
4719 }
4720 
4721 llvm::CallInst *
4722 WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
4723                                                        llvm::Value *Exn) {
4724   // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
4725   // the violating exception to mark it handled, but it is currently hard to do
4726   // with wasm EH instruction structure with catch/catch_all, we just call
4727   // std::terminate and ignore the violating exception as in CGCXXABI.
4728   // TODO Consider code transformation that makes calling __clang_call_terminate
4729   // possible.
4730   return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn);
4731 }
4732 
4733 /// Register a global destructor as best as we know how.
4734 void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
4735                                   llvm::FunctionCallee Dtor,
4736                                   llvm::Constant *Addr) {
4737   if (D.getTLSKind() != VarDecl::TLS_None) {
4738     // atexit routine expects "int(*)(int,...)"
4739     llvm::FunctionType *FTy =
4740         llvm::FunctionType::get(CGM.IntTy, CGM.IntTy, true);
4741     llvm::PointerType *FpTy = FTy->getPointerTo();
4742 
4743     // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
4744     llvm::FunctionType *AtExitTy =
4745         llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, FpTy}, true);
4746 
4747     // Fetch the actual function.
4748     llvm::FunctionCallee AtExit =
4749         CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np");
4750 
4751     // Create __dtor function for the var decl.
4752     llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit);
4753 
4754     // Register above __dtor with atexit().
4755     // First param is flags and must be 0, second param is function ptr
4756     llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy);
4757     CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub});
4758 
4759     // Cannot unregister TLS __dtor so done
4760     return;
4761   }
4762 
4763   // Create __dtor function for the var decl.
4764   llvm::Function *DtorStub = CGF.createAtExitStub(D, Dtor, Addr);
4765 
4766   // Register above __dtor with atexit().
4767   CGF.registerGlobalDtorWithAtExit(DtorStub);
4768 
4769   // Emit __finalize function to unregister __dtor and (as appropriate) call
4770   // __dtor.
4771   emitCXXStermFinalizer(D, DtorStub, Addr);
4772 }
4773 
4774 void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
4775                                      llvm::Constant *addr) {
4776   llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
4777   SmallString<256> FnName;
4778   {
4779     llvm::raw_svector_ostream Out(FnName);
4780     getMangleContext().mangleDynamicStermFinalizer(&D, Out);
4781   }
4782 
4783   // Create the finalization action associated with a variable.
4784   const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
4785   llvm::Function *StermFinalizer = CGM.CreateGlobalInitOrCleanUpFunction(
4786       FTy, FnName.str(), FI, D.getLocation());
4787 
4788   CodeGenFunction CGF(CGM);
4789 
4790   CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, StermFinalizer, FI,
4791                     FunctionArgList(), D.getLocation(),
4792                     D.getInit()->getExprLoc());
4793 
4794   // The unatexit subroutine unregisters __dtor functions that were previously
4795   // registered by the atexit subroutine. If the referenced function is found,
4796   // the unatexit returns a value of 0, meaning that the cleanup is still
4797   // pending (and we should call the __dtor function).
4798   llvm::Value *V = CGF.unregisterGlobalDtorWithUnAtExit(dtorStub);
4799 
4800   llvm::Value *NeedsDestruct = CGF.Builder.CreateIsNull(V, "needs_destruct");
4801 
4802   llvm::BasicBlock *DestructCallBlock = CGF.createBasicBlock("destruct.call");
4803   llvm::BasicBlock *EndBlock = CGF.createBasicBlock("destruct.end");
4804 
4805   // Check if unatexit returns a value of 0. If it does, jump to
4806   // DestructCallBlock, otherwise jump to EndBlock directly.
4807   CGF.Builder.CreateCondBr(NeedsDestruct, DestructCallBlock, EndBlock);
4808 
4809   CGF.EmitBlock(DestructCallBlock);
4810 
4811   // Emit the call to dtorStub.
4812   llvm::CallInst *CI = CGF.Builder.CreateCall(dtorStub);
4813 
4814   // Make sure the call and the callee agree on calling convention.
4815   CI->setCallingConv(dtorStub->getCallingConv());
4816 
4817   CGF.EmitBlock(EndBlock);
4818 
4819   CGF.FinishFunction();
4820 
4821   if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
4822     CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
4823                                              IPA->getPriority());
4824   } else if (isTemplateInstantiation(D.getTemplateSpecializationKind()) ||
4825              getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) {
4826     // According to C++ [basic.start.init]p2, class template static data
4827     // members (i.e., implicitly or explicitly instantiated specializations)
4828     // have unordered initialization. As a consequence, we can put them into
4829     // their own llvm.global_dtors entry.
4830     CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
4831   } else {
4832     CGM.AddCXXStermFinalizerEntry(StermFinalizer);
4833   }
4834 }
4835