xref: /freebsd-src/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
10b57cec5SDimitry Andric //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder  ----*- C++ -*-===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric // Builder implementation for CGRecordLayout objects.
100b57cec5SDimitry Andric //
110b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
120b57cec5SDimitry Andric 
13*0fca6ea1SDimitry Andric #include "ABIInfoImpl.h"
140b57cec5SDimitry Andric #include "CGCXXABI.h"
15*0fca6ea1SDimitry Andric #include "CGRecordLayout.h"
160b57cec5SDimitry Andric #include "CodeGenTypes.h"
170b57cec5SDimitry Andric #include "clang/AST/ASTContext.h"
180b57cec5SDimitry Andric #include "clang/AST/Attr.h"
190b57cec5SDimitry Andric #include "clang/AST/CXXInheritance.h"
200b57cec5SDimitry Andric #include "clang/AST/DeclCXX.h"
210b57cec5SDimitry Andric #include "clang/AST/Expr.h"
220b57cec5SDimitry Andric #include "clang/AST/RecordLayout.h"
230b57cec5SDimitry Andric #include "clang/Basic/CodeGenOptions.h"
240b57cec5SDimitry Andric #include "llvm/IR/DataLayout.h"
250b57cec5SDimitry Andric #include "llvm/IR/DerivedTypes.h"
260b57cec5SDimitry Andric #include "llvm/IR/Type.h"
270b57cec5SDimitry Andric #include "llvm/Support/Debug.h"
280b57cec5SDimitry Andric #include "llvm/Support/MathExtras.h"
290b57cec5SDimitry Andric #include "llvm/Support/raw_ostream.h"
300b57cec5SDimitry Andric using namespace clang;
310b57cec5SDimitry Andric using namespace CodeGen;
320b57cec5SDimitry Andric 
330b57cec5SDimitry Andric namespace {
340b57cec5SDimitry Andric /// The CGRecordLowering is responsible for lowering an ASTRecordLayout to an
350b57cec5SDimitry Andric /// llvm::Type.  Some of the lowering is straightforward, some is not.  Here we
360b57cec5SDimitry Andric /// detail some of the complexities and weirdnesses here.
370b57cec5SDimitry Andric /// * LLVM does not have unions - Unions can, in theory be represented by any
380b57cec5SDimitry Andric ///   llvm::Type with correct size.  We choose a field via a specific heuristic
390b57cec5SDimitry Andric ///   and add padding if necessary.
400b57cec5SDimitry Andric /// * LLVM does not have bitfields - Bitfields are collected into contiguous
410b57cec5SDimitry Andric ///   runs and allocated as a single storage type for the run.  ASTRecordLayout
420b57cec5SDimitry Andric ///   contains enough information to determine where the runs break.  Microsoft
430b57cec5SDimitry Andric ///   and Itanium follow different rules and use different codepaths.
440b57cec5SDimitry Andric /// * It is desired that, when possible, bitfields use the appropriate iN type
450b57cec5SDimitry Andric ///   when lowered to llvm types. For example unsigned x : 24 gets lowered to
460b57cec5SDimitry Andric ///   i24.  This isn't always possible because i24 has storage size of 32 bit
47*0fca6ea1SDimitry Andric ///   and if it is possible to use that extra byte of padding we must use [i8 x
48*0fca6ea1SDimitry Andric ///   3] instead of i24. This is computed when accumulating bitfields in
49*0fca6ea1SDimitry Andric ///   accumulateBitfields.
500b57cec5SDimitry Andric ///   C++ examples that require clipping:
510b57cec5SDimitry Andric ///   struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
52*0fca6ea1SDimitry Andric ///   struct A { int a : 24; ~A(); }; // a must be clipped because:
53*0fca6ea1SDimitry Andric ///   struct B : A { char b; }; // b goes at offset 3
54*0fca6ea1SDimitry Andric /// * The allocation of bitfield access units is described in more detail in
55*0fca6ea1SDimitry Andric ///   CGRecordLowering::accumulateBitFields.
560b57cec5SDimitry Andric /// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
570b57cec5SDimitry Andric ///   fields.  The existing asserts suggest that LLVM assumes that *every* field
580b57cec5SDimitry Andric ///   has an underlying storage type.  Therefore empty structures containing
590b57cec5SDimitry Andric ///   zero sized subobjects such as empty records or zero sized arrays still get
600b57cec5SDimitry Andric ///   a zero sized (empty struct) storage type.
610b57cec5SDimitry Andric /// * Clang reads the complete type rather than the base type when generating
620b57cec5SDimitry Andric ///   code to access fields.  Bitfields in tail position with tail padding may
630b57cec5SDimitry Andric ///   be clipped in the base class but not the complete class (we may discover
640b57cec5SDimitry Andric ///   that the tail padding is not used in the complete class.) However,
650b57cec5SDimitry Andric ///   because LLVM reads from the complete type it can generate incorrect code
660b57cec5SDimitry Andric ///   if we do not clip the tail padding off of the bitfield in the complete
67*0fca6ea1SDimitry Andric ///   layout.
680b57cec5SDimitry Andric /// * Itanium allows nearly empty primary virtual bases.  These bases don't get
690b57cec5SDimitry Andric ///   get their own storage because they're laid out as part of another base
700b57cec5SDimitry Andric ///   or at the beginning of the structure.  Determining if a VBase actually
710b57cec5SDimitry Andric ///   gets storage awkwardly involves a walk of all bases.
720b57cec5SDimitry Andric /// * VFPtrs and VBPtrs do *not* make a record NotZeroInitializable.
730b57cec5SDimitry Andric struct CGRecordLowering {
740b57cec5SDimitry Andric   // MemberInfo is a helper structure that contains information about a record
750b57cec5SDimitry Andric   // member.  In additional to the standard member types, there exists a
760b57cec5SDimitry Andric   // sentinel member type that ensures correct rounding.
770b57cec5SDimitry Andric   struct MemberInfo {
780b57cec5SDimitry Andric     CharUnits Offset;
79*0fca6ea1SDimitry Andric     enum InfoKind { VFPtr, VBPtr, Field, Base, VBase } Kind;
800b57cec5SDimitry Andric     llvm::Type *Data;
810b57cec5SDimitry Andric     union {
820b57cec5SDimitry Andric       const FieldDecl *FD;
830b57cec5SDimitry Andric       const CXXRecordDecl *RD;
840b57cec5SDimitry Andric     };
850b57cec5SDimitry Andric     MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
860b57cec5SDimitry Andric                const FieldDecl *FD = nullptr)
870b57cec5SDimitry Andric       : Offset(Offset), Kind(Kind), Data(Data), FD(FD) {}
880b57cec5SDimitry Andric     MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
890b57cec5SDimitry Andric                const CXXRecordDecl *RD)
900b57cec5SDimitry Andric       : Offset(Offset), Kind(Kind), Data(Data), RD(RD) {}
910b57cec5SDimitry Andric     // MemberInfos are sorted so we define a < operator.
920b57cec5SDimitry Andric     bool operator <(const MemberInfo& a) const { return Offset < a.Offset; }
930b57cec5SDimitry Andric   };
940b57cec5SDimitry Andric   // The constructor.
950b57cec5SDimitry Andric   CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
960b57cec5SDimitry Andric   // Short helper routines.
970b57cec5SDimitry Andric   /// Constructs a MemberInfo instance from an offset and llvm::Type *.
98*0fca6ea1SDimitry Andric   static MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
990b57cec5SDimitry Andric     return MemberInfo(Offset, MemberInfo::Field, Data);
1000b57cec5SDimitry Andric   }
1010b57cec5SDimitry Andric 
1020b57cec5SDimitry Andric   /// The Microsoft bitfield layout rule allocates discrete storage
1030b57cec5SDimitry Andric   /// units of the field's formal type and only combines adjacent
1040b57cec5SDimitry Andric   /// fields of the same formal type.  We want to emit a layout with
1050b57cec5SDimitry Andric   /// these discrete storage units instead of combining them into a
1060b57cec5SDimitry Andric   /// continuous run.
107*0fca6ea1SDimitry Andric   bool isDiscreteBitFieldABI() const {
1080b57cec5SDimitry Andric     return Context.getTargetInfo().getCXXABI().isMicrosoft() ||
1090b57cec5SDimitry Andric            D->isMsStruct(Context);
1100b57cec5SDimitry Andric   }
1110b57cec5SDimitry Andric 
112e8d8bef9SDimitry Andric   /// Helper function to check if we are targeting AAPCS.
113e8d8bef9SDimitry Andric   bool isAAPCS() const {
1145f757f3fSDimitry Andric     return Context.getTargetInfo().getABI().starts_with("aapcs");
115e8d8bef9SDimitry Andric   }
116e8d8bef9SDimitry Andric 
117e8d8bef9SDimitry Andric   /// Helper function to check if the target machine is BigEndian.
118e8d8bef9SDimitry Andric   bool isBE() const { return Context.getTargetInfo().isBigEndian(); }
119e8d8bef9SDimitry Andric 
1200b57cec5SDimitry Andric   /// The Itanium base layout rule allows virtual bases to overlap
1210b57cec5SDimitry Andric   /// other bases, which complicates layout in specific ways.
1220b57cec5SDimitry Andric   ///
1230b57cec5SDimitry Andric   /// Note specifically that the ms_struct attribute doesn't change this.
124*0fca6ea1SDimitry Andric   bool isOverlappingVBaseABI() const {
1250b57cec5SDimitry Andric     return !Context.getTargetInfo().getCXXABI().isMicrosoft();
1260b57cec5SDimitry Andric   }
1270b57cec5SDimitry Andric 
1280b57cec5SDimitry Andric   /// Wraps llvm::Type::getIntNTy with some implicit arguments.
129*0fca6ea1SDimitry Andric   llvm::Type *getIntNType(uint64_t NumBits) const {
130e8d8bef9SDimitry Andric     unsigned AlignedBits = llvm::alignTo(NumBits, Context.getCharWidth());
131e8d8bef9SDimitry Andric     return llvm::Type::getIntNTy(Types.getLLVMContext(), AlignedBits);
1320b57cec5SDimitry Andric   }
133e8d8bef9SDimitry Andric   /// Get the LLVM type sized as one character unit.
134*0fca6ea1SDimitry Andric   llvm::Type *getCharType() const {
135e8d8bef9SDimitry Andric     return llvm::Type::getIntNTy(Types.getLLVMContext(),
136e8d8bef9SDimitry Andric                                  Context.getCharWidth());
137e8d8bef9SDimitry Andric   }
138e8d8bef9SDimitry Andric   /// Gets an llvm type of size NumChars and alignment 1.
139*0fca6ea1SDimitry Andric   llvm::Type *getByteArrayType(CharUnits NumChars) const {
140e8d8bef9SDimitry Andric     assert(!NumChars.isZero() && "Empty byte arrays aren't allowed.");
141e8d8bef9SDimitry Andric     llvm::Type *Type = getCharType();
142e8d8bef9SDimitry Andric     return NumChars == CharUnits::One() ? Type :
143e8d8bef9SDimitry Andric         (llvm::Type *)llvm::ArrayType::get(Type, NumChars.getQuantity());
1440b57cec5SDimitry Andric   }
1450b57cec5SDimitry Andric   /// Gets the storage type for a field decl and handles storage
1460b57cec5SDimitry Andric   /// for itanium bitfields that are smaller than their declared type.
147*0fca6ea1SDimitry Andric   llvm::Type *getStorageType(const FieldDecl *FD) const {
1480b57cec5SDimitry Andric     llvm::Type *Type = Types.ConvertTypeForMem(FD->getType());
1490b57cec5SDimitry Andric     if (!FD->isBitField()) return Type;
1500b57cec5SDimitry Andric     if (isDiscreteBitFieldABI()) return Type;
1510b57cec5SDimitry Andric     return getIntNType(std::min(FD->getBitWidthValue(Context),
1520b57cec5SDimitry Andric                              (unsigned)Context.toBits(getSize(Type))));
1530b57cec5SDimitry Andric   }
1540b57cec5SDimitry Andric   /// Gets the llvm Basesubobject type from a CXXRecordDecl.
155*0fca6ea1SDimitry Andric   llvm::Type *getStorageType(const CXXRecordDecl *RD) const {
1560b57cec5SDimitry Andric     return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
1570b57cec5SDimitry Andric   }
158*0fca6ea1SDimitry Andric   CharUnits bitsToCharUnits(uint64_t BitOffset) const {
1590b57cec5SDimitry Andric     return Context.toCharUnitsFromBits(BitOffset);
1600b57cec5SDimitry Andric   }
161*0fca6ea1SDimitry Andric   CharUnits getSize(llvm::Type *Type) const {
1620b57cec5SDimitry Andric     return CharUnits::fromQuantity(DataLayout.getTypeAllocSize(Type));
1630b57cec5SDimitry Andric   }
164*0fca6ea1SDimitry Andric   CharUnits getAlignment(llvm::Type *Type) const {
165bdd1243dSDimitry Andric     return CharUnits::fromQuantity(DataLayout.getABITypeAlign(Type));
1660b57cec5SDimitry Andric   }
167*0fca6ea1SDimitry Andric   bool isZeroInitializable(const FieldDecl *FD) const {
1680b57cec5SDimitry Andric     return Types.isZeroInitializable(FD->getType());
1690b57cec5SDimitry Andric   }
170*0fca6ea1SDimitry Andric   bool isZeroInitializable(const RecordDecl *RD) const {
1710b57cec5SDimitry Andric     return Types.isZeroInitializable(RD);
1720b57cec5SDimitry Andric   }
1730b57cec5SDimitry Andric   void appendPaddingBytes(CharUnits Size) {
1740b57cec5SDimitry Andric     if (!Size.isZero())
1750b57cec5SDimitry Andric       FieldTypes.push_back(getByteArrayType(Size));
1760b57cec5SDimitry Andric   }
177*0fca6ea1SDimitry Andric   uint64_t getFieldBitOffset(const FieldDecl *FD) const {
1780b57cec5SDimitry Andric     return Layout.getFieldOffset(FD->getFieldIndex());
1790b57cec5SDimitry Andric   }
1800b57cec5SDimitry Andric   // Layout routines.
1810b57cec5SDimitry Andric   void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset,
1820b57cec5SDimitry Andric                        llvm::Type *StorageType);
1830b57cec5SDimitry Andric   /// Lowers an ASTRecordLayout to a llvm type.
1840b57cec5SDimitry Andric   void lower(bool NonVirtualBaseType);
18506c3fb27SDimitry Andric   void lowerUnion(bool isNoUniqueAddress);
186*0fca6ea1SDimitry Andric   void accumulateFields(bool isNonVirtualBaseType);
187*0fca6ea1SDimitry Andric   RecordDecl::field_iterator
188*0fca6ea1SDimitry Andric   accumulateBitFields(bool isNonVirtualBaseType,
189*0fca6ea1SDimitry Andric                       RecordDecl::field_iterator Field,
1900b57cec5SDimitry Andric                       RecordDecl::field_iterator FieldEnd);
191e8d8bef9SDimitry Andric   void computeVolatileBitfields();
1920b57cec5SDimitry Andric   void accumulateBases();
1930b57cec5SDimitry Andric   void accumulateVPtrs();
1940b57cec5SDimitry Andric   void accumulateVBases();
1950b57cec5SDimitry Andric   /// Recursively searches all of the bases to find out if a vbase is
1960b57cec5SDimitry Andric   /// not the primary vbase of some base class.
197*0fca6ea1SDimitry Andric   bool hasOwnStorage(const CXXRecordDecl *Decl,
198*0fca6ea1SDimitry Andric                      const CXXRecordDecl *Query) const;
1990b57cec5SDimitry Andric   void calculateZeroInit();
200*0fca6ea1SDimitry Andric   CharUnits calculateTailClippingOffset(bool isNonVirtualBaseType) const;
201*0fca6ea1SDimitry Andric   void checkBitfieldClipping(bool isNonVirtualBaseType) const;
2020b57cec5SDimitry Andric   /// Determines if we need a packed llvm struct.
2030b57cec5SDimitry Andric   void determinePacked(bool NVBaseType);
2040b57cec5SDimitry Andric   /// Inserts padding everywhere it's needed.
2050b57cec5SDimitry Andric   void insertPadding();
2060b57cec5SDimitry Andric   /// Fills out the structures that are ultimately consumed.
2070b57cec5SDimitry Andric   void fillOutputFields();
2080b57cec5SDimitry Andric   // Input memoization fields.
2090b57cec5SDimitry Andric   CodeGenTypes &Types;
2100b57cec5SDimitry Andric   const ASTContext &Context;
2110b57cec5SDimitry Andric   const RecordDecl *D;
2120b57cec5SDimitry Andric   const CXXRecordDecl *RD;
2130b57cec5SDimitry Andric   const ASTRecordLayout &Layout;
2140b57cec5SDimitry Andric   const llvm::DataLayout &DataLayout;
2150b57cec5SDimitry Andric   // Helpful intermediate data-structures.
2160b57cec5SDimitry Andric   std::vector<MemberInfo> Members;
2170b57cec5SDimitry Andric   // Output fields, consumed by CodeGenTypes::ComputeRecordLayout.
2180b57cec5SDimitry Andric   SmallVector<llvm::Type *, 16> FieldTypes;
2190b57cec5SDimitry Andric   llvm::DenseMap<const FieldDecl *, unsigned> Fields;
2200b57cec5SDimitry Andric   llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
2210b57cec5SDimitry Andric   llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
2220b57cec5SDimitry Andric   llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
2230b57cec5SDimitry Andric   bool IsZeroInitializable : 1;
2240b57cec5SDimitry Andric   bool IsZeroInitializableAsBase : 1;
2250b57cec5SDimitry Andric   bool Packed : 1;
2260b57cec5SDimitry Andric private:
2270b57cec5SDimitry Andric   CGRecordLowering(const CGRecordLowering &) = delete;
2280b57cec5SDimitry Andric   void operator =(const CGRecordLowering &) = delete;
2290b57cec5SDimitry Andric };
2300b57cec5SDimitry Andric } // namespace {
2310b57cec5SDimitry Andric 
2320b57cec5SDimitry Andric CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D,
2330b57cec5SDimitry Andric                                    bool Packed)
2340b57cec5SDimitry Andric     : Types(Types), Context(Types.getContext()), D(D),
2350b57cec5SDimitry Andric       RD(dyn_cast<CXXRecordDecl>(D)),
2360b57cec5SDimitry Andric       Layout(Types.getContext().getASTRecordLayout(D)),
2370b57cec5SDimitry Andric       DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
2380b57cec5SDimitry Andric       IsZeroInitializableAsBase(true), Packed(Packed) {}
2390b57cec5SDimitry Andric 
2400b57cec5SDimitry Andric void CGRecordLowering::setBitFieldInfo(
2410b57cec5SDimitry Andric     const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) {
2420b57cec5SDimitry Andric   CGBitFieldInfo &Info = BitFields[FD->getCanonicalDecl()];
2430b57cec5SDimitry Andric   Info.IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
2440b57cec5SDimitry Andric   Info.Offset = (unsigned)(getFieldBitOffset(FD) - Context.toBits(StartOffset));
2450b57cec5SDimitry Andric   Info.Size = FD->getBitWidthValue(Context);
2460b57cec5SDimitry Andric   Info.StorageSize = (unsigned)DataLayout.getTypeAllocSizeInBits(StorageType);
2470b57cec5SDimitry Andric   Info.StorageOffset = StartOffset;
2480b57cec5SDimitry Andric   if (Info.Size > Info.StorageSize)
2490b57cec5SDimitry Andric     Info.Size = Info.StorageSize;
2500b57cec5SDimitry Andric   // Reverse the bit offsets for big endian machines. Because we represent
2510b57cec5SDimitry Andric   // a bitfield as a single large integer load, we can imagine the bits
2520b57cec5SDimitry Andric   // counting from the most-significant-bit instead of the
2530b57cec5SDimitry Andric   // least-significant-bit.
2540b57cec5SDimitry Andric   if (DataLayout.isBigEndian())
2550b57cec5SDimitry Andric     Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
256e8d8bef9SDimitry Andric 
257e8d8bef9SDimitry Andric   Info.VolatileStorageSize = 0;
258e8d8bef9SDimitry Andric   Info.VolatileOffset = 0;
259e8d8bef9SDimitry Andric   Info.VolatileStorageOffset = CharUnits::Zero();
2600b57cec5SDimitry Andric }
2610b57cec5SDimitry Andric 
2620b57cec5SDimitry Andric void CGRecordLowering::lower(bool NVBaseType) {
2630b57cec5SDimitry Andric   // The lowering process implemented in this function takes a variety of
2640b57cec5SDimitry Andric   // carefully ordered phases.
2650b57cec5SDimitry Andric   // 1) Store all members (fields and bases) in a list and sort them by offset.
2660b57cec5SDimitry Andric   // 2) Add a 1-byte capstone member at the Size of the structure.
2670b57cec5SDimitry Andric   // 3) Clip bitfield storages members if their tail padding is or might be
2680b57cec5SDimitry Andric   //    used by another field or base.  The clipping process uses the capstone
2690b57cec5SDimitry Andric   //    by treating it as another object that occurs after the record.
2700b57cec5SDimitry Andric   // 4) Determine if the llvm-struct requires packing.  It's important that this
2710b57cec5SDimitry Andric   //    phase occur after clipping, because clipping changes the llvm type.
2720b57cec5SDimitry Andric   //    This phase reads the offset of the capstone when determining packedness
2730b57cec5SDimitry Andric   //    and updates the alignment of the capstone to be equal of the alignment
2740b57cec5SDimitry Andric   //    of the record after doing so.
2750b57cec5SDimitry Andric   // 5) Insert padding everywhere it is needed.  This phase requires 'Packed' to
2760b57cec5SDimitry Andric   //    have been computed and needs to know the alignment of the record in
2770b57cec5SDimitry Andric   //    order to understand if explicit tail padding is needed.
2780b57cec5SDimitry Andric   // 6) Remove the capstone, we don't need it anymore.
2790b57cec5SDimitry Andric   // 7) Determine if this record can be zero-initialized.  This phase could have
2800b57cec5SDimitry Andric   //    been placed anywhere after phase 1.
2810b57cec5SDimitry Andric   // 8) Format the complete list of members in a way that can be consumed by
2820b57cec5SDimitry Andric   //    CodeGenTypes::ComputeRecordLayout.
2830b57cec5SDimitry Andric   CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
284e8d8bef9SDimitry Andric   if (D->isUnion()) {
28506c3fb27SDimitry Andric     lowerUnion(NVBaseType);
286e8d8bef9SDimitry Andric     computeVolatileBitfields();
287e8d8bef9SDimitry Andric     return;
288e8d8bef9SDimitry Andric   }
289*0fca6ea1SDimitry Andric   accumulateFields(NVBaseType);
2900b57cec5SDimitry Andric   // RD implies C++.
2910b57cec5SDimitry Andric   if (RD) {
2920b57cec5SDimitry Andric     accumulateVPtrs();
2930b57cec5SDimitry Andric     accumulateBases();
294e8d8bef9SDimitry Andric     if (Members.empty()) {
295e8d8bef9SDimitry Andric       appendPaddingBytes(Size);
296e8d8bef9SDimitry Andric       computeVolatileBitfields();
297e8d8bef9SDimitry Andric       return;
298e8d8bef9SDimitry Andric     }
2990b57cec5SDimitry Andric     if (!NVBaseType)
3000b57cec5SDimitry Andric       accumulateVBases();
3010b57cec5SDimitry Andric   }
3020b57cec5SDimitry Andric   llvm::stable_sort(Members);
303*0fca6ea1SDimitry Andric   checkBitfieldClipping(NVBaseType);
3040b57cec5SDimitry Andric   Members.push_back(StorageInfo(Size, getIntNType(8)));
3050b57cec5SDimitry Andric   determinePacked(NVBaseType);
3060b57cec5SDimitry Andric   insertPadding();
3070b57cec5SDimitry Andric   Members.pop_back();
3080b57cec5SDimitry Andric   calculateZeroInit();
3090b57cec5SDimitry Andric   fillOutputFields();
310e8d8bef9SDimitry Andric   computeVolatileBitfields();
3110b57cec5SDimitry Andric }
3120b57cec5SDimitry Andric 
31306c3fb27SDimitry Andric void CGRecordLowering::lowerUnion(bool isNoUniqueAddress) {
31406c3fb27SDimitry Andric   CharUnits LayoutSize =
31506c3fb27SDimitry Andric       isNoUniqueAddress ? Layout.getDataSize() : Layout.getSize();
3160b57cec5SDimitry Andric   llvm::Type *StorageType = nullptr;
3170b57cec5SDimitry Andric   bool SeenNamedMember = false;
3180b57cec5SDimitry Andric   // Iterate through the fields setting bitFieldInfo and the Fields array. Also
3190b57cec5SDimitry Andric   // locate the "most appropriate" storage type.  The heuristic for finding the
3200b57cec5SDimitry Andric   // storage type isn't necessary, the first (non-0-length-bitfield) field's
3210b57cec5SDimitry Andric   // type would work fine and be simpler but would be different than what we've
3220b57cec5SDimitry Andric   // been doing and cause lit tests to change.
3230b57cec5SDimitry Andric   for (const auto *Field : D->fields()) {
3240b57cec5SDimitry Andric     if (Field->isBitField()) {
3250b57cec5SDimitry Andric       if (Field->isZeroLengthBitField(Context))
3260b57cec5SDimitry Andric         continue;
3270b57cec5SDimitry Andric       llvm::Type *FieldType = getStorageType(Field);
3280b57cec5SDimitry Andric       if (LayoutSize < getSize(FieldType))
3290b57cec5SDimitry Andric         FieldType = getByteArrayType(LayoutSize);
3300b57cec5SDimitry Andric       setBitFieldInfo(Field, CharUnits::Zero(), FieldType);
3310b57cec5SDimitry Andric     }
3320b57cec5SDimitry Andric     Fields[Field->getCanonicalDecl()] = 0;
3330b57cec5SDimitry Andric     llvm::Type *FieldType = getStorageType(Field);
3340b57cec5SDimitry Andric     // Compute zero-initializable status.
3350b57cec5SDimitry Andric     // This union might not be zero initialized: it may contain a pointer to
3360b57cec5SDimitry Andric     // data member which might have some exotic initialization sequence.
3370b57cec5SDimitry Andric     // If this is the case, then we aught not to try and come up with a "better"
3380b57cec5SDimitry Andric     // type, it might not be very easy to come up with a Constant which
3390b57cec5SDimitry Andric     // correctly initializes it.
3400b57cec5SDimitry Andric     if (!SeenNamedMember) {
3410b57cec5SDimitry Andric       SeenNamedMember = Field->getIdentifier();
3420b57cec5SDimitry Andric       if (!SeenNamedMember)
3430b57cec5SDimitry Andric         if (const auto *FieldRD = Field->getType()->getAsRecordDecl())
3440b57cec5SDimitry Andric           SeenNamedMember = FieldRD->findFirstNamedDataMember();
3450b57cec5SDimitry Andric       if (SeenNamedMember && !isZeroInitializable(Field)) {
3460b57cec5SDimitry Andric         IsZeroInitializable = IsZeroInitializableAsBase = false;
3470b57cec5SDimitry Andric         StorageType = FieldType;
3480b57cec5SDimitry Andric       }
3490b57cec5SDimitry Andric     }
3500b57cec5SDimitry Andric     // Because our union isn't zero initializable, we won't be getting a better
3510b57cec5SDimitry Andric     // storage type.
3520b57cec5SDimitry Andric     if (!IsZeroInitializable)
3530b57cec5SDimitry Andric       continue;
3540b57cec5SDimitry Andric     // Conditionally update our storage type if we've got a new "better" one.
3550b57cec5SDimitry Andric     if (!StorageType ||
3560b57cec5SDimitry Andric         getAlignment(FieldType) >  getAlignment(StorageType) ||
3570b57cec5SDimitry Andric         (getAlignment(FieldType) == getAlignment(StorageType) &&
3580b57cec5SDimitry Andric         getSize(FieldType) > getSize(StorageType)))
3590b57cec5SDimitry Andric       StorageType = FieldType;
3600b57cec5SDimitry Andric   }
3610b57cec5SDimitry Andric   // If we have no storage type just pad to the appropriate size and return.
3620b57cec5SDimitry Andric   if (!StorageType)
3630b57cec5SDimitry Andric     return appendPaddingBytes(LayoutSize);
3640b57cec5SDimitry Andric   // If our storage size was bigger than our required size (can happen in the
3650b57cec5SDimitry Andric   // case of packed bitfields on Itanium) then just use an I8 array.
3660b57cec5SDimitry Andric   if (LayoutSize < getSize(StorageType))
3670b57cec5SDimitry Andric     StorageType = getByteArrayType(LayoutSize);
3680b57cec5SDimitry Andric   FieldTypes.push_back(StorageType);
3690b57cec5SDimitry Andric   appendPaddingBytes(LayoutSize - getSize(StorageType));
3700b57cec5SDimitry Andric   // Set packed if we need it.
37106c3fb27SDimitry Andric   const auto StorageAlignment = getAlignment(StorageType);
37206c3fb27SDimitry Andric   assert((Layout.getSize() % StorageAlignment == 0 ||
37306c3fb27SDimitry Andric           Layout.getDataSize() % StorageAlignment) &&
37406c3fb27SDimitry Andric          "Union's standard layout and no_unique_address layout must agree on "
37506c3fb27SDimitry Andric          "packedness");
37606c3fb27SDimitry Andric   if (Layout.getDataSize() % StorageAlignment)
3770b57cec5SDimitry Andric     Packed = true;
3780b57cec5SDimitry Andric }
3790b57cec5SDimitry Andric 
380*0fca6ea1SDimitry Andric void CGRecordLowering::accumulateFields(bool isNonVirtualBaseType) {
3810b57cec5SDimitry Andric   for (RecordDecl::field_iterator Field = D->field_begin(),
3820b57cec5SDimitry Andric                                   FieldEnd = D->field_end();
3830b57cec5SDimitry Andric        Field != FieldEnd;) {
3840b57cec5SDimitry Andric     if (Field->isBitField()) {
385*0fca6ea1SDimitry Andric       Field = accumulateBitFields(isNonVirtualBaseType, Field, FieldEnd);
386*0fca6ea1SDimitry Andric       assert((Field == FieldEnd || !Field->isBitField()) &&
387*0fca6ea1SDimitry Andric              "Failed to accumulate all the bitfields");
388*0fca6ea1SDimitry Andric     } else if (isEmptyFieldForLayout(Context, *Field)) {
389*0fca6ea1SDimitry Andric       // Empty fields have no storage.
390*0fca6ea1SDimitry Andric       ++Field;
391*0fca6ea1SDimitry Andric     } else {
39206c3fb27SDimitry Andric       // Use base subobject layout for the potentially-overlapping field,
39306c3fb27SDimitry Andric       // as it is done in RecordLayoutBuilder
3940b57cec5SDimitry Andric       Members.push_back(MemberInfo(
3950b57cec5SDimitry Andric           bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
39606c3fb27SDimitry Andric           Field->isPotentiallyOverlapping()
39706c3fb27SDimitry Andric               ? getStorageType(Field->getType()->getAsCXXRecordDecl())
39806c3fb27SDimitry Andric               : getStorageType(*Field),
39906c3fb27SDimitry Andric           *Field));
4000b57cec5SDimitry Andric       ++Field;
4010b57cec5SDimitry Andric     }
4020b57cec5SDimitry Andric   }
4030b57cec5SDimitry Andric }
4040b57cec5SDimitry Andric 
405*0fca6ea1SDimitry Andric // Create members for bitfields. Field is a bitfield, and FieldEnd is the end
406*0fca6ea1SDimitry Andric // iterator of the record. Return the first non-bitfield encountered.  We need
407*0fca6ea1SDimitry Andric // to know whether this is the base or complete layout, as virtual bases could
408*0fca6ea1SDimitry Andric // affect the upper bound of bitfield access unit allocation.
409*0fca6ea1SDimitry Andric RecordDecl::field_iterator
410*0fca6ea1SDimitry Andric CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType,
411*0fca6ea1SDimitry Andric                                       RecordDecl::field_iterator Field,
4120b57cec5SDimitry Andric                                       RecordDecl::field_iterator FieldEnd) {
413*0fca6ea1SDimitry Andric   if (isDiscreteBitFieldABI()) {
4140b57cec5SDimitry Andric     // Run stores the first element of the current run of bitfields. FieldEnd is
4150b57cec5SDimitry Andric     // used as a special value to note that we don't have a current run. A
416*0fca6ea1SDimitry Andric     // bitfield run is a contiguous collection of bitfields that can be stored
417*0fca6ea1SDimitry Andric     // in the same storage block. Zero-sized bitfields and bitfields that would
4180b57cec5SDimitry Andric     // cross an alignment boundary break a run and start a new one.
4190b57cec5SDimitry Andric     RecordDecl::field_iterator Run = FieldEnd;
4200b57cec5SDimitry Andric     // Tail is the offset of the first bit off the end of the current run. It's
421*0fca6ea1SDimitry Andric     // used to determine if the ASTRecordLayout is treating these two bitfields
422*0fca6ea1SDimitry Andric     // as contiguous. StartBitOffset is offset of the beginning of the Run.
4230b57cec5SDimitry Andric     uint64_t StartBitOffset, Tail = 0;
424*0fca6ea1SDimitry Andric     for (; Field != FieldEnd && Field->isBitField(); ++Field) {
4250b57cec5SDimitry Andric       // Zero-width bitfields end runs.
4260b57cec5SDimitry Andric       if (Field->isZeroLengthBitField(Context)) {
4270b57cec5SDimitry Andric         Run = FieldEnd;
4280b57cec5SDimitry Andric         continue;
4290b57cec5SDimitry Andric       }
430*0fca6ea1SDimitry Andric       uint64_t BitOffset = getFieldBitOffset(*Field);
431*0fca6ea1SDimitry Andric       llvm::Type *Type = Types.ConvertTypeForMem(Field->getType());
4320b57cec5SDimitry Andric       // If we don't have a run yet, or don't live within the previous run's
4330b57cec5SDimitry Andric       // allocated storage then we allocate some storage and start a new run.
4340b57cec5SDimitry Andric       if (Run == FieldEnd || BitOffset >= Tail) {
4350b57cec5SDimitry Andric         Run = Field;
4360b57cec5SDimitry Andric         StartBitOffset = BitOffset;
4370b57cec5SDimitry Andric         Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
4380b57cec5SDimitry Andric         // Add the storage member to the record.  This must be added to the
4390b57cec5SDimitry Andric         // record before the bitfield members so that it gets laid out before
4400b57cec5SDimitry Andric         // the bitfields it contains get laid out.
4410b57cec5SDimitry Andric         Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
4420b57cec5SDimitry Andric       }
4430b57cec5SDimitry Andric       // Bitfields get the offset of their storage but come afterward and remain
4440b57cec5SDimitry Andric       // there after a stable sort.
4450b57cec5SDimitry Andric       Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
4460b57cec5SDimitry Andric                                    MemberInfo::Field, nullptr, *Field));
4470b57cec5SDimitry Andric     }
448*0fca6ea1SDimitry Andric     return Field;
4490b57cec5SDimitry Andric   }
4500b57cec5SDimitry Andric 
451*0fca6ea1SDimitry Andric   // The SysV ABI can overlap bitfield storage units with both other bitfield
452*0fca6ea1SDimitry Andric   // storage units /and/ other non-bitfield data members. Accessing a sequence
453*0fca6ea1SDimitry Andric   // of bitfields mustn't interfere with adjacent non-bitfields -- they're
454*0fca6ea1SDimitry Andric   // permitted to be accessed in separate threads for instance.
4550b57cec5SDimitry Andric 
456*0fca6ea1SDimitry Andric   // We split runs of bit-fields into a sequence of "access units". When we emit
457*0fca6ea1SDimitry Andric   // a load or store of a bit-field, we'll load/store the entire containing
458*0fca6ea1SDimitry Andric   // access unit. As mentioned, the standard requires that these loads and
459*0fca6ea1SDimitry Andric   // stores must not interfere with accesses to other memory locations, and it
460*0fca6ea1SDimitry Andric   // defines the bit-field's memory location as the current run of
461*0fca6ea1SDimitry Andric   // non-zero-width bit-fields. So an access unit must never overlap with
462*0fca6ea1SDimitry Andric   // non-bit-field storage or cross a zero-width bit-field. Otherwise, we're
463*0fca6ea1SDimitry Andric   // free to draw the lines as we see fit.
464*0fca6ea1SDimitry Andric 
465*0fca6ea1SDimitry Andric   // Drawing these lines well can be complicated. LLVM generally can't modify a
466*0fca6ea1SDimitry Andric   // program to access memory that it didn't before, so using very narrow access
467*0fca6ea1SDimitry Andric   // units can prevent the compiler from using optimal access patterns. For
468*0fca6ea1SDimitry Andric   // example, suppose a run of bit-fields occupies four bytes in a struct. If we
469*0fca6ea1SDimitry Andric   // split that into four 1-byte access units, then a sequence of assignments
470*0fca6ea1SDimitry Andric   // that doesn't touch all four bytes may have to be emitted with multiple
471*0fca6ea1SDimitry Andric   // 8-bit stores instead of a single 32-bit store. On the other hand, if we use
472*0fca6ea1SDimitry Andric   // very wide access units, we may find ourselves emitting accesses to
473*0fca6ea1SDimitry Andric   // bit-fields we didn't really need to touch, just because LLVM was unable to
474*0fca6ea1SDimitry Andric   // clean up after us.
475*0fca6ea1SDimitry Andric 
476*0fca6ea1SDimitry Andric   // It is desirable to have access units be aligned powers of 2 no larger than
477*0fca6ea1SDimitry Andric   // a register. (On non-strict alignment ISAs, the alignment requirement can be
478*0fca6ea1SDimitry Andric   // dropped.) A three byte access unit will be accessed using 2-byte and 1-byte
479*0fca6ea1SDimitry Andric   // accesses and bit manipulation. If no bitfield straddles across the two
480*0fca6ea1SDimitry Andric   // separate accesses, it is better to have separate 2-byte and 1-byte access
481*0fca6ea1SDimitry Andric   // units, as then LLVM will not generate unnecessary memory accesses, or bit
482*0fca6ea1SDimitry Andric   // manipulation. Similarly, on a strict-alignment architecture, it is better
483*0fca6ea1SDimitry Andric   // to keep access-units naturally aligned, to avoid similar bit
484*0fca6ea1SDimitry Andric   // manipulation synthesizing larger unaligned accesses.
485*0fca6ea1SDimitry Andric 
486*0fca6ea1SDimitry Andric   // Bitfields that share parts of a single byte are, of necessity, placed in
487*0fca6ea1SDimitry Andric   // the same access unit. That unit will encompass a consecutive run where
488*0fca6ea1SDimitry Andric   // adjacent bitfields share parts of a byte. (The first bitfield of such an
489*0fca6ea1SDimitry Andric   // access unit will start at the beginning of a byte.)
490*0fca6ea1SDimitry Andric 
491*0fca6ea1SDimitry Andric   // We then try and accumulate adjacent access units when the combined unit is
492*0fca6ea1SDimitry Andric   // naturally sized, no larger than a register, and (on a strict alignment
493*0fca6ea1SDimitry Andric   // ISA), naturally aligned. Note that this requires lookahead to one or more
494*0fca6ea1SDimitry Andric   // subsequent access units. For instance, consider a 2-byte access-unit
495*0fca6ea1SDimitry Andric   // followed by 2 1-byte units. We can merge that into a 4-byte access-unit,
496*0fca6ea1SDimitry Andric   // but we would not want to merge a 2-byte followed by a single 1-byte (and no
497*0fca6ea1SDimitry Andric   // available tail padding). We keep track of the best access unit seen so far,
498*0fca6ea1SDimitry Andric   // and use that when we determine we cannot accumulate any more. Then we start
499*0fca6ea1SDimitry Andric   // again at the bitfield following that best one.
500*0fca6ea1SDimitry Andric 
501*0fca6ea1SDimitry Andric   // The accumulation is also prevented when:
502*0fca6ea1SDimitry Andric   // *) it would cross a character-aigned zero-width bitfield, or
503*0fca6ea1SDimitry Andric   // *) fine-grained bitfield access option is in effect.
504*0fca6ea1SDimitry Andric 
505*0fca6ea1SDimitry Andric   CharUnits RegSize =
506*0fca6ea1SDimitry Andric       bitsToCharUnits(Context.getTargetInfo().getRegisterWidth());
507*0fca6ea1SDimitry Andric   unsigned CharBits = Context.getCharWidth();
508*0fca6ea1SDimitry Andric 
509*0fca6ea1SDimitry Andric   // Limit of useable tail padding at end of the record. Computed lazily and
510*0fca6ea1SDimitry Andric   // cached here.
511*0fca6ea1SDimitry Andric   CharUnits ScissorOffset = CharUnits::Zero();
512*0fca6ea1SDimitry Andric 
513*0fca6ea1SDimitry Andric   // Data about the start of the span we're accumulating to create an access
514*0fca6ea1SDimitry Andric   // unit from. Begin is the first bitfield of the span. If Begin is FieldEnd,
515*0fca6ea1SDimitry Andric   // we've not got a current span. The span starts at the BeginOffset character
516*0fca6ea1SDimitry Andric   // boundary. BitSizeSinceBegin is the size (in bits) of the span -- this might
517*0fca6ea1SDimitry Andric   // include padding when we've advanced to a subsequent bitfield run.
518*0fca6ea1SDimitry Andric   RecordDecl::field_iterator Begin = FieldEnd;
519*0fca6ea1SDimitry Andric   CharUnits BeginOffset;
520*0fca6ea1SDimitry Andric   uint64_t BitSizeSinceBegin;
521*0fca6ea1SDimitry Andric 
522*0fca6ea1SDimitry Andric   // The (non-inclusive) end of the largest acceptable access unit we've found
523*0fca6ea1SDimitry Andric   // since Begin. If this is Begin, we're gathering the initial set of bitfields
524*0fca6ea1SDimitry Andric   // of a new span. BestEndOffset is the end of that acceptable access unit --
525*0fca6ea1SDimitry Andric   // it might extend beyond the last character of the bitfield run, using
526*0fca6ea1SDimitry Andric   // available padding characters.
527*0fca6ea1SDimitry Andric   RecordDecl::field_iterator BestEnd = Begin;
528*0fca6ea1SDimitry Andric   CharUnits BestEndOffset;
529*0fca6ea1SDimitry Andric   bool BestClipped; // Whether the representation must be in a byte array.
530*0fca6ea1SDimitry Andric 
5310b57cec5SDimitry Andric   for (;;) {
532*0fca6ea1SDimitry Andric     // AtAlignedBoundary is true iff Field is the (potential) start of a new
533*0fca6ea1SDimitry Andric     // span (or the end of the bitfields). When true, LimitOffset is the
534*0fca6ea1SDimitry Andric     // character offset of that span and Barrier indicates whether the new
535*0fca6ea1SDimitry Andric     // span cannot be merged into the current one.
536*0fca6ea1SDimitry Andric     bool AtAlignedBoundary = false;
537*0fca6ea1SDimitry Andric     bool Barrier = false;
538*0fca6ea1SDimitry Andric 
539*0fca6ea1SDimitry Andric     if (Field != FieldEnd && Field->isBitField()) {
540*0fca6ea1SDimitry Andric       uint64_t BitOffset = getFieldBitOffset(*Field);
541*0fca6ea1SDimitry Andric       if (Begin == FieldEnd) {
542*0fca6ea1SDimitry Andric         // Beginning a new span.
543*0fca6ea1SDimitry Andric         Begin = Field;
544*0fca6ea1SDimitry Andric         BestEnd = Begin;
545*0fca6ea1SDimitry Andric 
546*0fca6ea1SDimitry Andric         assert((BitOffset % CharBits) == 0 && "Not at start of char");
547*0fca6ea1SDimitry Andric         BeginOffset = bitsToCharUnits(BitOffset);
548*0fca6ea1SDimitry Andric         BitSizeSinceBegin = 0;
549*0fca6ea1SDimitry Andric       } else if ((BitOffset % CharBits) != 0) {
550*0fca6ea1SDimitry Andric         // Bitfield occupies the same character as previous bitfield, it must be
551*0fca6ea1SDimitry Andric         // part of the same span. This can include zero-length bitfields, should
552*0fca6ea1SDimitry Andric         // the target not align them to character boundaries. Such non-alignment
553*0fca6ea1SDimitry Andric         // is at variance with the standards, which require zero-length
554*0fca6ea1SDimitry Andric         // bitfields be a barrier between access units. But of course we can't
555*0fca6ea1SDimitry Andric         // achieve that in the middle of a character.
556*0fca6ea1SDimitry Andric         assert(BitOffset == Context.toBits(BeginOffset) + BitSizeSinceBegin &&
557*0fca6ea1SDimitry Andric                "Concatenating non-contiguous bitfields");
558*0fca6ea1SDimitry Andric       } else {
559*0fca6ea1SDimitry Andric         // Bitfield potentially begins a new span. This includes zero-length
560*0fca6ea1SDimitry Andric         // bitfields on non-aligning targets that lie at character boundaries
561*0fca6ea1SDimitry Andric         // (those are barriers to merging).
562*0fca6ea1SDimitry Andric         if (Field->isZeroLengthBitField(Context))
563*0fca6ea1SDimitry Andric           Barrier = true;
564*0fca6ea1SDimitry Andric         AtAlignedBoundary = true;
565*0fca6ea1SDimitry Andric       }
566*0fca6ea1SDimitry Andric     } else {
567*0fca6ea1SDimitry Andric       // We've reached the end of the bitfield run. Either we're done, or this
568*0fca6ea1SDimitry Andric       // is a barrier for the current span.
569*0fca6ea1SDimitry Andric       if (Begin == FieldEnd)
5700b57cec5SDimitry Andric         break;
571*0fca6ea1SDimitry Andric 
572*0fca6ea1SDimitry Andric       Barrier = true;
573*0fca6ea1SDimitry Andric       AtAlignedBoundary = true;
5740b57cec5SDimitry Andric     }
5750b57cec5SDimitry Andric 
576*0fca6ea1SDimitry Andric     // InstallBest indicates whether we should create an access unit for the
577*0fca6ea1SDimitry Andric     // current best span: fields [Begin, BestEnd) occupying characters
578*0fca6ea1SDimitry Andric     // [BeginOffset, BestEndOffset).
579*0fca6ea1SDimitry Andric     bool InstallBest = false;
580*0fca6ea1SDimitry Andric     if (AtAlignedBoundary) {
581*0fca6ea1SDimitry Andric       // Field is the start of a new span or the end of the bitfields. The
582*0fca6ea1SDimitry Andric       // just-seen span now extends to BitSizeSinceBegin.
583*0fca6ea1SDimitry Andric 
584*0fca6ea1SDimitry Andric       // Determine if we can accumulate that just-seen span into the current
585*0fca6ea1SDimitry Andric       // accumulation.
586*0fca6ea1SDimitry Andric       CharUnits AccessSize = bitsToCharUnits(BitSizeSinceBegin + CharBits - 1);
587*0fca6ea1SDimitry Andric       if (BestEnd == Begin) {
588*0fca6ea1SDimitry Andric         // This is the initial run at the start of a new span. By definition,
589*0fca6ea1SDimitry Andric         // this is the best seen so far.
590*0fca6ea1SDimitry Andric         BestEnd = Field;
591*0fca6ea1SDimitry Andric         BestEndOffset = BeginOffset + AccessSize;
592*0fca6ea1SDimitry Andric         // Assume clipped until proven not below.
593*0fca6ea1SDimitry Andric         BestClipped = true;
594*0fca6ea1SDimitry Andric         if (!BitSizeSinceBegin)
595*0fca6ea1SDimitry Andric           // A zero-sized initial span -- this will install nothing and reset
596*0fca6ea1SDimitry Andric           // for another.
597*0fca6ea1SDimitry Andric           InstallBest = true;
598*0fca6ea1SDimitry Andric       } else if (AccessSize > RegSize)
599*0fca6ea1SDimitry Andric         // Accumulating the just-seen span would create a multi-register access
600*0fca6ea1SDimitry Andric         // unit, which would increase register pressure.
601*0fca6ea1SDimitry Andric         InstallBest = true;
602*0fca6ea1SDimitry Andric 
603*0fca6ea1SDimitry Andric       if (!InstallBest) {
604*0fca6ea1SDimitry Andric         // Determine if accumulating the just-seen span will create an expensive
605*0fca6ea1SDimitry Andric         // access unit or not.
606*0fca6ea1SDimitry Andric         llvm::Type *Type = getIntNType(Context.toBits(AccessSize));
607*0fca6ea1SDimitry Andric         if (!Context.getTargetInfo().hasCheapUnalignedBitFieldAccess()) {
608*0fca6ea1SDimitry Andric           // Unaligned accesses are expensive. Only accumulate if the new unit
609*0fca6ea1SDimitry Andric           // is naturally aligned. Otherwise install the best we have, which is
610*0fca6ea1SDimitry Andric           // either the initial access unit (can't do better), or a naturally
611*0fca6ea1SDimitry Andric           // aligned accumulation (since we would have already installed it if
612*0fca6ea1SDimitry Andric           // it wasn't naturally aligned).
613*0fca6ea1SDimitry Andric           CharUnits Align = getAlignment(Type);
614*0fca6ea1SDimitry Andric           if (Align > Layout.getAlignment())
615*0fca6ea1SDimitry Andric             // The alignment required is greater than the containing structure
616*0fca6ea1SDimitry Andric             // itself.
617*0fca6ea1SDimitry Andric             InstallBest = true;
618*0fca6ea1SDimitry Andric           else if (!BeginOffset.isMultipleOf(Align))
619*0fca6ea1SDimitry Andric             // The access unit is not at a naturally aligned offset within the
620*0fca6ea1SDimitry Andric             // structure.
621*0fca6ea1SDimitry Andric             InstallBest = true;
622*0fca6ea1SDimitry Andric 
623*0fca6ea1SDimitry Andric           if (InstallBest && BestEnd == Field)
624*0fca6ea1SDimitry Andric             // We're installing the first span, whose clipping was presumed
625*0fca6ea1SDimitry Andric             // above. Compute it correctly.
626*0fca6ea1SDimitry Andric             if (getSize(Type) == AccessSize)
627*0fca6ea1SDimitry Andric               BestClipped = false;
6280b57cec5SDimitry Andric         }
6290b57cec5SDimitry Andric 
630*0fca6ea1SDimitry Andric         if (!InstallBest) {
631*0fca6ea1SDimitry Andric           // Find the next used storage offset to determine what the limit of
632*0fca6ea1SDimitry Andric           // the current span is. That's either the offset of the next field
633*0fca6ea1SDimitry Andric           // with storage (which might be Field itself) or the end of the
634*0fca6ea1SDimitry Andric           // non-reusable tail padding.
635*0fca6ea1SDimitry Andric           CharUnits LimitOffset;
636*0fca6ea1SDimitry Andric           for (auto Probe = Field; Probe != FieldEnd; ++Probe)
637*0fca6ea1SDimitry Andric             if (!isEmptyFieldForLayout(Context, *Probe)) {
638*0fca6ea1SDimitry Andric               // A member with storage sets the limit.
639*0fca6ea1SDimitry Andric               assert((getFieldBitOffset(*Probe) % CharBits) == 0 &&
640*0fca6ea1SDimitry Andric                      "Next storage is not byte-aligned");
641*0fca6ea1SDimitry Andric               LimitOffset = bitsToCharUnits(getFieldBitOffset(*Probe));
642*0fca6ea1SDimitry Andric               goto FoundLimit;
6430b57cec5SDimitry Andric             }
644*0fca6ea1SDimitry Andric           // We reached the end of the fields, determine the bounds of useable
645*0fca6ea1SDimitry Andric           // tail padding. As this can be complex for C++, we cache the result.
646*0fca6ea1SDimitry Andric           if (ScissorOffset.isZero()) {
647*0fca6ea1SDimitry Andric             ScissorOffset = calculateTailClippingOffset(isNonVirtualBaseType);
648*0fca6ea1SDimitry Andric             assert(!ScissorOffset.isZero() && "Tail clipping at zero");
649*0fca6ea1SDimitry Andric           }
650*0fca6ea1SDimitry Andric 
651*0fca6ea1SDimitry Andric           LimitOffset = ScissorOffset;
652*0fca6ea1SDimitry Andric         FoundLimit:;
653*0fca6ea1SDimitry Andric 
654*0fca6ea1SDimitry Andric           CharUnits TypeSize = getSize(Type);
655*0fca6ea1SDimitry Andric           if (BeginOffset + TypeSize <= LimitOffset) {
656*0fca6ea1SDimitry Andric             // There is space before LimitOffset to create a naturally-sized
657*0fca6ea1SDimitry Andric             // access unit.
658*0fca6ea1SDimitry Andric             BestEndOffset = BeginOffset + TypeSize;
659*0fca6ea1SDimitry Andric             BestEnd = Field;
660*0fca6ea1SDimitry Andric             BestClipped = false;
661*0fca6ea1SDimitry Andric           }
662*0fca6ea1SDimitry Andric 
663*0fca6ea1SDimitry Andric           if (Barrier)
664*0fca6ea1SDimitry Andric             // The next field is a barrier that we cannot merge across.
665*0fca6ea1SDimitry Andric             InstallBest = true;
666*0fca6ea1SDimitry Andric           else if (Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
667*0fca6ea1SDimitry Andric             // Fine-grained access, so no merging of spans.
668*0fca6ea1SDimitry Andric             InstallBest = true;
669*0fca6ea1SDimitry Andric           else
670*0fca6ea1SDimitry Andric             // Otherwise, we're not installing. Update the bit size
671*0fca6ea1SDimitry Andric             // of the current span to go all the way to LimitOffset, which is
672*0fca6ea1SDimitry Andric             // the (aligned) offset of next bitfield to consider.
673*0fca6ea1SDimitry Andric             BitSizeSinceBegin = Context.toBits(LimitOffset - BeginOffset);
674*0fca6ea1SDimitry Andric         }
675*0fca6ea1SDimitry Andric       }
676*0fca6ea1SDimitry Andric     }
677*0fca6ea1SDimitry Andric 
678*0fca6ea1SDimitry Andric     if (InstallBest) {
679*0fca6ea1SDimitry Andric       assert((Field == FieldEnd || !Field->isBitField() ||
680*0fca6ea1SDimitry Andric               (getFieldBitOffset(*Field) % CharBits) == 0) &&
681*0fca6ea1SDimitry Andric              "Installing but not at an aligned bitfield or limit");
682*0fca6ea1SDimitry Andric       CharUnits AccessSize = BestEndOffset - BeginOffset;
683*0fca6ea1SDimitry Andric       if (!AccessSize.isZero()) {
684*0fca6ea1SDimitry Andric         // Add the storage member for the access unit to the record. The
685*0fca6ea1SDimitry Andric         // bitfields get the offset of their storage but come afterward and
686*0fca6ea1SDimitry Andric         // remain there after a stable sort.
687*0fca6ea1SDimitry Andric         llvm::Type *Type;
688*0fca6ea1SDimitry Andric         if (BestClipped) {
689*0fca6ea1SDimitry Andric           assert(getSize(getIntNType(Context.toBits(AccessSize))) >
690*0fca6ea1SDimitry Andric                      AccessSize &&
691*0fca6ea1SDimitry Andric                  "Clipped access need not be clipped");
692*0fca6ea1SDimitry Andric           Type = getByteArrayType(AccessSize);
693*0fca6ea1SDimitry Andric         } else {
694*0fca6ea1SDimitry Andric           Type = getIntNType(Context.toBits(AccessSize));
695*0fca6ea1SDimitry Andric           assert(getSize(Type) == AccessSize &&
696*0fca6ea1SDimitry Andric                  "Unclipped access must be clipped");
697*0fca6ea1SDimitry Andric         }
698*0fca6ea1SDimitry Andric         Members.push_back(StorageInfo(BeginOffset, Type));
699*0fca6ea1SDimitry Andric         for (; Begin != BestEnd; ++Begin)
700*0fca6ea1SDimitry Andric           if (!Begin->isZeroLengthBitField(Context))
701*0fca6ea1SDimitry Andric             Members.push_back(
702*0fca6ea1SDimitry Andric                 MemberInfo(BeginOffset, MemberInfo::Field, nullptr, *Begin));
703*0fca6ea1SDimitry Andric       }
704*0fca6ea1SDimitry Andric       // Reset to start a new span.
705*0fca6ea1SDimitry Andric       Field = BestEnd;
706*0fca6ea1SDimitry Andric       Begin = FieldEnd;
707*0fca6ea1SDimitry Andric     } else {
708*0fca6ea1SDimitry Andric       assert(Field != FieldEnd && Field->isBitField() &&
709*0fca6ea1SDimitry Andric              "Accumulating past end of bitfields");
710*0fca6ea1SDimitry Andric       assert(!Barrier && "Accumulating across barrier");
711*0fca6ea1SDimitry Andric       // Accumulate this bitfield into the current (potential) span.
712*0fca6ea1SDimitry Andric       BitSizeSinceBegin += Field->getBitWidthValue(Context);
713*0fca6ea1SDimitry Andric       ++Field;
714*0fca6ea1SDimitry Andric     }
715*0fca6ea1SDimitry Andric   }
716*0fca6ea1SDimitry Andric 
717*0fca6ea1SDimitry Andric   return Field;
7180b57cec5SDimitry Andric }
7190b57cec5SDimitry Andric 
7200b57cec5SDimitry Andric void CGRecordLowering::accumulateBases() {
7210b57cec5SDimitry Andric   // If we've got a primary virtual base, we need to add it with the bases.
7220b57cec5SDimitry Andric   if (Layout.isPrimaryBaseVirtual()) {
7230b57cec5SDimitry Andric     const CXXRecordDecl *BaseDecl = Layout.getPrimaryBase();
7240b57cec5SDimitry Andric     Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::Base,
7250b57cec5SDimitry Andric                                  getStorageType(BaseDecl), BaseDecl));
7260b57cec5SDimitry Andric   }
7270b57cec5SDimitry Andric   // Accumulate the non-virtual bases.
7280b57cec5SDimitry Andric   for (const auto &Base : RD->bases()) {
7290b57cec5SDimitry Andric     if (Base.isVirtual())
7300b57cec5SDimitry Andric       continue;
7310b57cec5SDimitry Andric 
7320b57cec5SDimitry Andric     // Bases can be zero-sized even if not technically empty if they
7330b57cec5SDimitry Andric     // contain only a trailing array member.
7340b57cec5SDimitry Andric     const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
735*0fca6ea1SDimitry Andric     if (!isEmptyRecordForLayout(Context, Base.getType()) &&
7360b57cec5SDimitry Andric         !Context.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
7370b57cec5SDimitry Andric       Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
7380b57cec5SDimitry Andric           MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
7390b57cec5SDimitry Andric   }
7400b57cec5SDimitry Andric }
7410b57cec5SDimitry Andric 
742e8d8bef9SDimitry Andric /// The AAPCS that defines that, when possible, bit-fields should
743e8d8bef9SDimitry Andric /// be accessed using containers of the declared type width:
744e8d8bef9SDimitry Andric /// When a volatile bit-field is read, and its container does not overlap with
745e8d8bef9SDimitry Andric /// any non-bit-field member or any zero length bit-field member, its container
746e8d8bef9SDimitry Andric /// must be read exactly once using the access width appropriate to the type of
747e8d8bef9SDimitry Andric /// the container. When a volatile bit-field is written, and its container does
748e8d8bef9SDimitry Andric /// not overlap with any non-bit-field member or any zero-length bit-field
749e8d8bef9SDimitry Andric /// member, its container must be read exactly once and written exactly once
750e8d8bef9SDimitry Andric /// using the access width appropriate to the type of the container. The two
751e8d8bef9SDimitry Andric /// accesses are not atomic.
752e8d8bef9SDimitry Andric ///
753e8d8bef9SDimitry Andric /// Enforcing the width restriction can be disabled using
754e8d8bef9SDimitry Andric /// -fno-aapcs-bitfield-width.
755e8d8bef9SDimitry Andric void CGRecordLowering::computeVolatileBitfields() {
756e8d8bef9SDimitry Andric   if (!isAAPCS() || !Types.getCodeGenOpts().AAPCSBitfieldWidth)
757e8d8bef9SDimitry Andric     return;
758e8d8bef9SDimitry Andric 
759e8d8bef9SDimitry Andric   for (auto &I : BitFields) {
760e8d8bef9SDimitry Andric     const FieldDecl *Field = I.first;
761e8d8bef9SDimitry Andric     CGBitFieldInfo &Info = I.second;
762e8d8bef9SDimitry Andric     llvm::Type *ResLTy = Types.ConvertTypeForMem(Field->getType());
763e8d8bef9SDimitry Andric     // If the record alignment is less than the type width, we can't enforce a
764e8d8bef9SDimitry Andric     // aligned load, bail out.
765e8d8bef9SDimitry Andric     if ((uint64_t)(Context.toBits(Layout.getAlignment())) <
766e8d8bef9SDimitry Andric         ResLTy->getPrimitiveSizeInBits())
767e8d8bef9SDimitry Andric       continue;
768e8d8bef9SDimitry Andric     // CGRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
769e8d8bef9SDimitry Andric     // for big-endian targets, but it assumes a container of width
770e8d8bef9SDimitry Andric     // Info.StorageSize. Since AAPCS uses a different container size (width
771e8d8bef9SDimitry Andric     // of the type), we first undo that calculation here and redo it once
772e8d8bef9SDimitry Andric     // the bit-field offset within the new container is calculated.
773e8d8bef9SDimitry Andric     const unsigned OldOffset =
774e8d8bef9SDimitry Andric         isBE() ? Info.StorageSize - (Info.Offset + Info.Size) : Info.Offset;
775e8d8bef9SDimitry Andric     // Offset to the bit-field from the beginning of the struct.
776e8d8bef9SDimitry Andric     const unsigned AbsoluteOffset =
777e8d8bef9SDimitry Andric         Context.toBits(Info.StorageOffset) + OldOffset;
778e8d8bef9SDimitry Andric 
779e8d8bef9SDimitry Andric     // Container size is the width of the bit-field type.
780e8d8bef9SDimitry Andric     const unsigned StorageSize = ResLTy->getPrimitiveSizeInBits();
781e8d8bef9SDimitry Andric     // Nothing to do if the access uses the desired
782e8d8bef9SDimitry Andric     // container width and is naturally aligned.
783e8d8bef9SDimitry Andric     if (Info.StorageSize == StorageSize && (OldOffset % StorageSize == 0))
784e8d8bef9SDimitry Andric       continue;
785e8d8bef9SDimitry Andric 
786e8d8bef9SDimitry Andric     // Offset within the container.
787e8d8bef9SDimitry Andric     unsigned Offset = AbsoluteOffset & (StorageSize - 1);
788e8d8bef9SDimitry Andric     // Bail out if an aligned load of the container cannot cover the entire
789e8d8bef9SDimitry Andric     // bit-field. This can happen for example, if the bit-field is part of a
790e8d8bef9SDimitry Andric     // packed struct. AAPCS does not define access rules for such cases, we let
791e8d8bef9SDimitry Andric     // clang to follow its own rules.
792e8d8bef9SDimitry Andric     if (Offset + Info.Size > StorageSize)
793e8d8bef9SDimitry Andric       continue;
794e8d8bef9SDimitry Andric 
795e8d8bef9SDimitry Andric     // Re-adjust offsets for big-endian targets.
796e8d8bef9SDimitry Andric     if (isBE())
797e8d8bef9SDimitry Andric       Offset = StorageSize - (Offset + Info.Size);
798e8d8bef9SDimitry Andric 
799e8d8bef9SDimitry Andric     const CharUnits StorageOffset =
800e8d8bef9SDimitry Andric         Context.toCharUnitsFromBits(AbsoluteOffset & ~(StorageSize - 1));
801e8d8bef9SDimitry Andric     const CharUnits End = StorageOffset +
802e8d8bef9SDimitry Andric                           Context.toCharUnitsFromBits(StorageSize) -
803e8d8bef9SDimitry Andric                           CharUnits::One();
804e8d8bef9SDimitry Andric 
805e8d8bef9SDimitry Andric     const ASTRecordLayout &Layout =
806e8d8bef9SDimitry Andric         Context.getASTRecordLayout(Field->getParent());
807e8d8bef9SDimitry Andric     // If we access outside memory outside the record, than bail out.
808e8d8bef9SDimitry Andric     const CharUnits RecordSize = Layout.getSize();
809e8d8bef9SDimitry Andric     if (End >= RecordSize)
810e8d8bef9SDimitry Andric       continue;
811e8d8bef9SDimitry Andric 
812e8d8bef9SDimitry Andric     // Bail out if performing this load would access non-bit-fields members.
813e8d8bef9SDimitry Andric     bool Conflict = false;
814e8d8bef9SDimitry Andric     for (const auto *F : D->fields()) {
815e8d8bef9SDimitry Andric       // Allow sized bit-fields overlaps.
816e8d8bef9SDimitry Andric       if (F->isBitField() && !F->isZeroLengthBitField(Context))
817e8d8bef9SDimitry Andric         continue;
818e8d8bef9SDimitry Andric 
819e8d8bef9SDimitry Andric       const CharUnits FOffset = Context.toCharUnitsFromBits(
820e8d8bef9SDimitry Andric           Layout.getFieldOffset(F->getFieldIndex()));
821e8d8bef9SDimitry Andric 
822e8d8bef9SDimitry Andric       // As C11 defines, a zero sized bit-field defines a barrier, so
823e8d8bef9SDimitry Andric       // fields after and before it should be race condition free.
824e8d8bef9SDimitry Andric       // The AAPCS acknowledges it and imposes no restritions when the
825e8d8bef9SDimitry Andric       // natural container overlaps a zero-length bit-field.
826e8d8bef9SDimitry Andric       if (F->isZeroLengthBitField(Context)) {
827e8d8bef9SDimitry Andric         if (End > FOffset && StorageOffset < FOffset) {
828e8d8bef9SDimitry Andric           Conflict = true;
829e8d8bef9SDimitry Andric           break;
830e8d8bef9SDimitry Andric         }
831e8d8bef9SDimitry Andric       }
832e8d8bef9SDimitry Andric 
833e8d8bef9SDimitry Andric       const CharUnits FEnd =
834e8d8bef9SDimitry Andric           FOffset +
835e8d8bef9SDimitry Andric           Context.toCharUnitsFromBits(
836e8d8bef9SDimitry Andric               Types.ConvertTypeForMem(F->getType())->getPrimitiveSizeInBits()) -
837e8d8bef9SDimitry Andric           CharUnits::One();
838e8d8bef9SDimitry Andric       // If no overlap, continue.
839e8d8bef9SDimitry Andric       if (End < FOffset || FEnd < StorageOffset)
840e8d8bef9SDimitry Andric         continue;
841e8d8bef9SDimitry Andric 
842e8d8bef9SDimitry Andric       // The desired load overlaps a non-bit-field member, bail out.
843e8d8bef9SDimitry Andric       Conflict = true;
844e8d8bef9SDimitry Andric       break;
845e8d8bef9SDimitry Andric     }
846e8d8bef9SDimitry Andric 
847e8d8bef9SDimitry Andric     if (Conflict)
848e8d8bef9SDimitry Andric       continue;
849e8d8bef9SDimitry Andric     // Write the new bit-field access parameters.
850e8d8bef9SDimitry Andric     // As the storage offset now is defined as the number of elements from the
851e8d8bef9SDimitry Andric     // start of the structure, we should divide the Offset by the element size.
852e8d8bef9SDimitry Andric     Info.VolatileStorageOffset =
853e8d8bef9SDimitry Andric         StorageOffset / Context.toCharUnitsFromBits(StorageSize).getQuantity();
854e8d8bef9SDimitry Andric     Info.VolatileStorageSize = StorageSize;
855e8d8bef9SDimitry Andric     Info.VolatileOffset = Offset;
856e8d8bef9SDimitry Andric   }
857e8d8bef9SDimitry Andric }
858e8d8bef9SDimitry Andric 
8590b57cec5SDimitry Andric void CGRecordLowering::accumulateVPtrs() {
8600b57cec5SDimitry Andric   if (Layout.hasOwnVFPtr())
8615f757f3fSDimitry Andric     Members.push_back(
8625f757f3fSDimitry Andric         MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
8635f757f3fSDimitry Andric                    llvm::PointerType::getUnqual(Types.getLLVMContext())));
8640b57cec5SDimitry Andric   if (Layout.hasOwnVBPtr())
8655f757f3fSDimitry Andric     Members.push_back(
8665f757f3fSDimitry Andric         MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr,
8675f757f3fSDimitry Andric                    llvm::PointerType::getUnqual(Types.getLLVMContext())));
8680b57cec5SDimitry Andric }
8690b57cec5SDimitry Andric 
870*0fca6ea1SDimitry Andric CharUnits
871*0fca6ea1SDimitry Andric CGRecordLowering::calculateTailClippingOffset(bool isNonVirtualBaseType) const {
872*0fca6ea1SDimitry Andric   if (!RD)
873*0fca6ea1SDimitry Andric     return Layout.getDataSize();
874*0fca6ea1SDimitry Andric 
8750b57cec5SDimitry Andric   CharUnits ScissorOffset = Layout.getNonVirtualSize();
8760b57cec5SDimitry Andric   // In the itanium ABI, it's possible to place a vbase at a dsize that is
8770b57cec5SDimitry Andric   // smaller than the nvsize.  Here we check to see if such a base is placed
8780b57cec5SDimitry Andric   // before the nvsize and set the scissor offset to that, instead of the
8790b57cec5SDimitry Andric   // nvsize.
880*0fca6ea1SDimitry Andric   if (!isNonVirtualBaseType && isOverlappingVBaseABI())
8810b57cec5SDimitry Andric     for (const auto &Base : RD->vbases()) {
8820b57cec5SDimitry Andric       const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
883*0fca6ea1SDimitry Andric       if (isEmptyRecordForLayout(Context, Base.getType()))
8840b57cec5SDimitry Andric         continue;
8850b57cec5SDimitry Andric       // If the vbase is a primary virtual base of some base, then it doesn't
8860b57cec5SDimitry Andric       // get its own storage location but instead lives inside of that base.
8870b57cec5SDimitry Andric       if (Context.isNearlyEmpty(BaseDecl) && !hasOwnStorage(RD, BaseDecl))
8880b57cec5SDimitry Andric         continue;
8890b57cec5SDimitry Andric       ScissorOffset = std::min(ScissorOffset,
8900b57cec5SDimitry Andric                                Layout.getVBaseClassOffset(BaseDecl));
8910b57cec5SDimitry Andric     }
892*0fca6ea1SDimitry Andric 
893*0fca6ea1SDimitry Andric   return ScissorOffset;
894*0fca6ea1SDimitry Andric }
895*0fca6ea1SDimitry Andric 
896*0fca6ea1SDimitry Andric void CGRecordLowering::accumulateVBases() {
8970b57cec5SDimitry Andric   for (const auto &Base : RD->vbases()) {
8980b57cec5SDimitry Andric     const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
899*0fca6ea1SDimitry Andric     if (isEmptyRecordForLayout(Context, Base.getType()))
9000b57cec5SDimitry Andric       continue;
9010b57cec5SDimitry Andric     CharUnits Offset = Layout.getVBaseClassOffset(BaseDecl);
9020b57cec5SDimitry Andric     // If the vbase is a primary virtual base of some base, then it doesn't
9030b57cec5SDimitry Andric     // get its own storage location but instead lives inside of that base.
9040b57cec5SDimitry Andric     if (isOverlappingVBaseABI() &&
9050b57cec5SDimitry Andric         Context.isNearlyEmpty(BaseDecl) &&
9060b57cec5SDimitry Andric         !hasOwnStorage(RD, BaseDecl)) {
9070b57cec5SDimitry Andric       Members.push_back(MemberInfo(Offset, MemberInfo::VBase, nullptr,
9080b57cec5SDimitry Andric                                    BaseDecl));
9090b57cec5SDimitry Andric       continue;
9100b57cec5SDimitry Andric     }
9110b57cec5SDimitry Andric     // If we've got a vtordisp, add it as a storage type.
9120b57cec5SDimitry Andric     if (Layout.getVBaseOffsetsMap().find(BaseDecl)->second.hasVtorDisp())
9130b57cec5SDimitry Andric       Members.push_back(StorageInfo(Offset - CharUnits::fromQuantity(4),
9140b57cec5SDimitry Andric                                     getIntNType(32)));
9150b57cec5SDimitry Andric     Members.push_back(MemberInfo(Offset, MemberInfo::VBase,
9160b57cec5SDimitry Andric                                  getStorageType(BaseDecl), BaseDecl));
9170b57cec5SDimitry Andric   }
9180b57cec5SDimitry Andric }
9190b57cec5SDimitry Andric 
9200b57cec5SDimitry Andric bool CGRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl,
921*0fca6ea1SDimitry Andric                                      const CXXRecordDecl *Query) const {
9220b57cec5SDimitry Andric   const ASTRecordLayout &DeclLayout = Context.getASTRecordLayout(Decl);
9230b57cec5SDimitry Andric   if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query)
9240b57cec5SDimitry Andric     return false;
9250b57cec5SDimitry Andric   for (const auto &Base : Decl->bases())
9260b57cec5SDimitry Andric     if (!hasOwnStorage(Base.getType()->getAsCXXRecordDecl(), Query))
9270b57cec5SDimitry Andric       return false;
9280b57cec5SDimitry Andric   return true;
9290b57cec5SDimitry Andric }
9300b57cec5SDimitry Andric 
9310b57cec5SDimitry Andric void CGRecordLowering::calculateZeroInit() {
9320b57cec5SDimitry Andric   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
9330b57cec5SDimitry Andric                                                MemberEnd = Members.end();
9340b57cec5SDimitry Andric        IsZeroInitializableAsBase && Member != MemberEnd; ++Member) {
9350b57cec5SDimitry Andric     if (Member->Kind == MemberInfo::Field) {
9360b57cec5SDimitry Andric       if (!Member->FD || isZeroInitializable(Member->FD))
9370b57cec5SDimitry Andric         continue;
9380b57cec5SDimitry Andric       IsZeroInitializable = IsZeroInitializableAsBase = false;
9390b57cec5SDimitry Andric     } else if (Member->Kind == MemberInfo::Base ||
9400b57cec5SDimitry Andric                Member->Kind == MemberInfo::VBase) {
9410b57cec5SDimitry Andric       if (isZeroInitializable(Member->RD))
9420b57cec5SDimitry Andric         continue;
9430b57cec5SDimitry Andric       IsZeroInitializable = false;
9440b57cec5SDimitry Andric       if (Member->Kind == MemberInfo::Base)
9450b57cec5SDimitry Andric         IsZeroInitializableAsBase = false;
9460b57cec5SDimitry Andric     }
9470b57cec5SDimitry Andric   }
9480b57cec5SDimitry Andric }
9490b57cec5SDimitry Andric 
950*0fca6ea1SDimitry Andric // Verify accumulateBitfields computed the correct storage representations.
951*0fca6ea1SDimitry Andric void CGRecordLowering::checkBitfieldClipping(bool IsNonVirtualBaseType) const {
952*0fca6ea1SDimitry Andric #ifndef NDEBUG
953*0fca6ea1SDimitry Andric   auto ScissorOffset = calculateTailClippingOffset(IsNonVirtualBaseType);
954*0fca6ea1SDimitry Andric   auto Tail = CharUnits::Zero();
955*0fca6ea1SDimitry Andric   for (const auto &M : Members) {
956*0fca6ea1SDimitry Andric     // Only members with data could possibly overlap.
957*0fca6ea1SDimitry Andric     if (!M.Data)
9580b57cec5SDimitry Andric       continue;
959*0fca6ea1SDimitry Andric 
960*0fca6ea1SDimitry Andric     assert(M.Offset >= Tail && "Bitfield access unit is not clipped");
961*0fca6ea1SDimitry Andric     Tail = M.Offset + getSize(M.Data);
962*0fca6ea1SDimitry Andric     assert((Tail <= ScissorOffset || M.Offset >= ScissorOffset) &&
963*0fca6ea1SDimitry Andric            "Bitfield straddles scissor offset");
9640b57cec5SDimitry Andric   }
965*0fca6ea1SDimitry Andric #endif
9660b57cec5SDimitry Andric }
9670b57cec5SDimitry Andric 
9680b57cec5SDimitry Andric void CGRecordLowering::determinePacked(bool NVBaseType) {
9690b57cec5SDimitry Andric   if (Packed)
9700b57cec5SDimitry Andric     return;
9710b57cec5SDimitry Andric   CharUnits Alignment = CharUnits::One();
9720b57cec5SDimitry Andric   CharUnits NVAlignment = CharUnits::One();
9730b57cec5SDimitry Andric   CharUnits NVSize =
9740b57cec5SDimitry Andric       !NVBaseType && RD ? Layout.getNonVirtualSize() : CharUnits::Zero();
9750b57cec5SDimitry Andric   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
9760b57cec5SDimitry Andric                                                MemberEnd = Members.end();
9770b57cec5SDimitry Andric        Member != MemberEnd; ++Member) {
9780b57cec5SDimitry Andric     if (!Member->Data)
9790b57cec5SDimitry Andric       continue;
9800b57cec5SDimitry Andric     // If any member falls at an offset that it not a multiple of its alignment,
9810b57cec5SDimitry Andric     // then the entire record must be packed.
9820b57cec5SDimitry Andric     if (Member->Offset % getAlignment(Member->Data))
9830b57cec5SDimitry Andric       Packed = true;
9840b57cec5SDimitry Andric     if (Member->Offset < NVSize)
9850b57cec5SDimitry Andric       NVAlignment = std::max(NVAlignment, getAlignment(Member->Data));
9860b57cec5SDimitry Andric     Alignment = std::max(Alignment, getAlignment(Member->Data));
9870b57cec5SDimitry Andric   }
9880b57cec5SDimitry Andric   // If the size of the record (the capstone's offset) is not a multiple of the
9890b57cec5SDimitry Andric   // record's alignment, it must be packed.
9900b57cec5SDimitry Andric   if (Members.back().Offset % Alignment)
9910b57cec5SDimitry Andric     Packed = true;
9920b57cec5SDimitry Andric   // If the non-virtual sub-object is not a multiple of the non-virtual
9930b57cec5SDimitry Andric   // sub-object's alignment, it must be packed.  We cannot have a packed
9940b57cec5SDimitry Andric   // non-virtual sub-object and an unpacked complete object or vise versa.
9950b57cec5SDimitry Andric   if (NVSize % NVAlignment)
9960b57cec5SDimitry Andric     Packed = true;
9970b57cec5SDimitry Andric   // Update the alignment of the sentinel.
9980b57cec5SDimitry Andric   if (!Packed)
9990b57cec5SDimitry Andric     Members.back().Data = getIntNType(Context.toBits(Alignment));
10000b57cec5SDimitry Andric }
10010b57cec5SDimitry Andric 
10020b57cec5SDimitry Andric void CGRecordLowering::insertPadding() {
10030b57cec5SDimitry Andric   std::vector<std::pair<CharUnits, CharUnits> > Padding;
10040b57cec5SDimitry Andric   CharUnits Size = CharUnits::Zero();
10050b57cec5SDimitry Andric   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
10060b57cec5SDimitry Andric                                                MemberEnd = Members.end();
10070b57cec5SDimitry Andric        Member != MemberEnd; ++Member) {
10080b57cec5SDimitry Andric     if (!Member->Data)
10090b57cec5SDimitry Andric       continue;
10100b57cec5SDimitry Andric     CharUnits Offset = Member->Offset;
10110b57cec5SDimitry Andric     assert(Offset >= Size);
10120b57cec5SDimitry Andric     // Insert padding if we need to.
10130b57cec5SDimitry Andric     if (Offset !=
10140b57cec5SDimitry Andric         Size.alignTo(Packed ? CharUnits::One() : getAlignment(Member->Data)))
10150b57cec5SDimitry Andric       Padding.push_back(std::make_pair(Size, Offset - Size));
10160b57cec5SDimitry Andric     Size = Offset + getSize(Member->Data);
10170b57cec5SDimitry Andric   }
10180b57cec5SDimitry Andric   if (Padding.empty())
10190b57cec5SDimitry Andric     return;
10200b57cec5SDimitry Andric   // Add the padding to the Members list and sort it.
10210b57cec5SDimitry Andric   for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
10220b57cec5SDimitry Andric         Pad = Padding.begin(), PadEnd = Padding.end();
10230b57cec5SDimitry Andric         Pad != PadEnd; ++Pad)
10240b57cec5SDimitry Andric     Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
10250b57cec5SDimitry Andric   llvm::stable_sort(Members);
10260b57cec5SDimitry Andric }
10270b57cec5SDimitry Andric 
10280b57cec5SDimitry Andric void CGRecordLowering::fillOutputFields() {
10290b57cec5SDimitry Andric   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
10300b57cec5SDimitry Andric                                                MemberEnd = Members.end();
10310b57cec5SDimitry Andric        Member != MemberEnd; ++Member) {
10320b57cec5SDimitry Andric     if (Member->Data)
10330b57cec5SDimitry Andric       FieldTypes.push_back(Member->Data);
10340b57cec5SDimitry Andric     if (Member->Kind == MemberInfo::Field) {
10350b57cec5SDimitry Andric       if (Member->FD)
10360b57cec5SDimitry Andric         Fields[Member->FD->getCanonicalDecl()] = FieldTypes.size() - 1;
10370b57cec5SDimitry Andric       // A field without storage must be a bitfield.
10380b57cec5SDimitry Andric       if (!Member->Data)
10390b57cec5SDimitry Andric         setBitFieldInfo(Member->FD, Member->Offset, FieldTypes.back());
10400b57cec5SDimitry Andric     } else if (Member->Kind == MemberInfo::Base)
10410b57cec5SDimitry Andric       NonVirtualBases[Member->RD] = FieldTypes.size() - 1;
10420b57cec5SDimitry Andric     else if (Member->Kind == MemberInfo::VBase)
10430b57cec5SDimitry Andric       VirtualBases[Member->RD] = FieldTypes.size() - 1;
10440b57cec5SDimitry Andric   }
10450b57cec5SDimitry Andric }
10460b57cec5SDimitry Andric 
10470b57cec5SDimitry Andric CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
10480b57cec5SDimitry Andric                                         const FieldDecl *FD,
10490b57cec5SDimitry Andric                                         uint64_t Offset, uint64_t Size,
10500b57cec5SDimitry Andric                                         uint64_t StorageSize,
10510b57cec5SDimitry Andric                                         CharUnits StorageOffset) {
10520b57cec5SDimitry Andric   // This function is vestigial from CGRecordLayoutBuilder days but is still
10530b57cec5SDimitry Andric   // used in GCObjCRuntime.cpp.  That usage has a "fixme" attached to it that
10540b57cec5SDimitry Andric   // when addressed will allow for the removal of this function.
10550b57cec5SDimitry Andric   llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
10560b57cec5SDimitry Andric   CharUnits TypeSizeInBytes =
10570b57cec5SDimitry Andric     CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
10580b57cec5SDimitry Andric   uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
10590b57cec5SDimitry Andric 
10600b57cec5SDimitry Andric   bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
10610b57cec5SDimitry Andric 
10620b57cec5SDimitry Andric   if (Size > TypeSizeInBits) {
10630b57cec5SDimitry Andric     // We have a wide bit-field. The extra bits are only used for padding, so
10640b57cec5SDimitry Andric     // if we have a bitfield of type T, with size N:
10650b57cec5SDimitry Andric     //
10660b57cec5SDimitry Andric     // T t : N;
10670b57cec5SDimitry Andric     //
10680b57cec5SDimitry Andric     // We can just assume that it's:
10690b57cec5SDimitry Andric     //
10700b57cec5SDimitry Andric     // T t : sizeof(T);
10710b57cec5SDimitry Andric     //
10720b57cec5SDimitry Andric     Size = TypeSizeInBits;
10730b57cec5SDimitry Andric   }
10740b57cec5SDimitry Andric 
10750b57cec5SDimitry Andric   // Reverse the bit offsets for big endian machines. Because we represent
10760b57cec5SDimitry Andric   // a bitfield as a single large integer load, we can imagine the bits
10770b57cec5SDimitry Andric   // counting from the most-significant-bit instead of the
10780b57cec5SDimitry Andric   // least-significant-bit.
10790b57cec5SDimitry Andric   if (Types.getDataLayout().isBigEndian()) {
10800b57cec5SDimitry Andric     Offset = StorageSize - (Offset + Size);
10810b57cec5SDimitry Andric   }
10820b57cec5SDimitry Andric 
10830b57cec5SDimitry Andric   return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset);
10840b57cec5SDimitry Andric }
10850b57cec5SDimitry Andric 
10865ffd83dbSDimitry Andric std::unique_ptr<CGRecordLayout>
10875ffd83dbSDimitry Andric CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty) {
10880b57cec5SDimitry Andric   CGRecordLowering Builder(*this, D, /*Packed=*/false);
10890b57cec5SDimitry Andric 
10900b57cec5SDimitry Andric   Builder.lower(/*NonVirtualBaseType=*/false);
10910b57cec5SDimitry Andric 
10920b57cec5SDimitry Andric   // If we're in C++, compute the base subobject type.
10930b57cec5SDimitry Andric   llvm::StructType *BaseTy = nullptr;
109406c3fb27SDimitry Andric   if (isa<CXXRecordDecl>(D)) {
10950b57cec5SDimitry Andric     BaseTy = Ty;
10960b57cec5SDimitry Andric     if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
10970b57cec5SDimitry Andric       CGRecordLowering BaseBuilder(*this, D, /*Packed=*/Builder.Packed);
10980b57cec5SDimitry Andric       BaseBuilder.lower(/*NonVirtualBaseType=*/true);
10990b57cec5SDimitry Andric       BaseTy = llvm::StructType::create(
11000b57cec5SDimitry Andric           getLLVMContext(), BaseBuilder.FieldTypes, "", BaseBuilder.Packed);
11010b57cec5SDimitry Andric       addRecordTypeName(D, BaseTy, ".base");
11020b57cec5SDimitry Andric       // BaseTy and Ty must agree on their packedness for getLLVMFieldNo to work
11030b57cec5SDimitry Andric       // on both of them with the same index.
11040b57cec5SDimitry Andric       assert(Builder.Packed == BaseBuilder.Packed &&
11050b57cec5SDimitry Andric              "Non-virtual and complete types must agree on packedness");
11060b57cec5SDimitry Andric     }
11070b57cec5SDimitry Andric   }
11080b57cec5SDimitry Andric 
11090b57cec5SDimitry Andric   // Fill in the struct *after* computing the base type.  Filling in the body
11100b57cec5SDimitry Andric   // signifies that the type is no longer opaque and record layout is complete,
11110b57cec5SDimitry Andric   // but we may need to recursively layout D while laying D out as a base type.
11120b57cec5SDimitry Andric   Ty->setBody(Builder.FieldTypes, Builder.Packed);
11130b57cec5SDimitry Andric 
11145ffd83dbSDimitry Andric   auto RL = std::make_unique<CGRecordLayout>(
11155ffd83dbSDimitry Andric       Ty, BaseTy, (bool)Builder.IsZeroInitializable,
11165ffd83dbSDimitry Andric       (bool)Builder.IsZeroInitializableAsBase);
11170b57cec5SDimitry Andric 
11180b57cec5SDimitry Andric   RL->NonVirtualBases.swap(Builder.NonVirtualBases);
11190b57cec5SDimitry Andric   RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
11200b57cec5SDimitry Andric 
11210b57cec5SDimitry Andric   // Add all the field numbers.
11220b57cec5SDimitry Andric   RL->FieldInfo.swap(Builder.Fields);
11230b57cec5SDimitry Andric 
11240b57cec5SDimitry Andric   // Add bitfield info.
11250b57cec5SDimitry Andric   RL->BitFields.swap(Builder.BitFields);
11260b57cec5SDimitry Andric 
11270b57cec5SDimitry Andric   // Dump the layout, if requested.
11280b57cec5SDimitry Andric   if (getContext().getLangOpts().DumpRecordLayouts) {
11290b57cec5SDimitry Andric     llvm::outs() << "\n*** Dumping IRgen Record Layout\n";
11300b57cec5SDimitry Andric     llvm::outs() << "Record: ";
11310b57cec5SDimitry Andric     D->dump(llvm::outs());
11320b57cec5SDimitry Andric     llvm::outs() << "\nLayout: ";
11330b57cec5SDimitry Andric     RL->print(llvm::outs());
11340b57cec5SDimitry Andric   }
11350b57cec5SDimitry Andric 
11360b57cec5SDimitry Andric #ifndef NDEBUG
11370b57cec5SDimitry Andric   // Verify that the computed LLVM struct size matches the AST layout size.
11380b57cec5SDimitry Andric   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
11390b57cec5SDimitry Andric 
11400b57cec5SDimitry Andric   uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
11410b57cec5SDimitry Andric   assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) &&
11420b57cec5SDimitry Andric          "Type size mismatch!");
11430b57cec5SDimitry Andric 
11440b57cec5SDimitry Andric   if (BaseTy) {
11450b57cec5SDimitry Andric     CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
11460b57cec5SDimitry Andric 
11470b57cec5SDimitry Andric     uint64_t AlignedNonVirtualTypeSizeInBits =
11480b57cec5SDimitry Andric       getContext().toBits(NonVirtualSize);
11490b57cec5SDimitry Andric 
11500b57cec5SDimitry Andric     assert(AlignedNonVirtualTypeSizeInBits ==
11510b57cec5SDimitry Andric            getDataLayout().getTypeAllocSizeInBits(BaseTy) &&
11520b57cec5SDimitry Andric            "Type size mismatch!");
11530b57cec5SDimitry Andric   }
11540b57cec5SDimitry Andric 
11550b57cec5SDimitry Andric   // Verify that the LLVM and AST field offsets agree.
11560b57cec5SDimitry Andric   llvm::StructType *ST = RL->getLLVMType();
11570b57cec5SDimitry Andric   const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST);
11580b57cec5SDimitry Andric 
11590b57cec5SDimitry Andric   const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
11600b57cec5SDimitry Andric   RecordDecl::field_iterator it = D->field_begin();
11610b57cec5SDimitry Andric   for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
11620b57cec5SDimitry Andric     const FieldDecl *FD = *it;
11630b57cec5SDimitry Andric 
11640b57cec5SDimitry Andric     // Ignore zero-sized fields.
1165*0fca6ea1SDimitry Andric     if (isEmptyFieldForLayout(getContext(), FD))
11660b57cec5SDimitry Andric       continue;
11670b57cec5SDimitry Andric 
11680b57cec5SDimitry Andric     // For non-bit-fields, just check that the LLVM struct offset matches the
11690b57cec5SDimitry Andric     // AST offset.
11700b57cec5SDimitry Andric     if (!FD->isBitField()) {
11710b57cec5SDimitry Andric       unsigned FieldNo = RL->getLLVMFieldNo(FD);
11720b57cec5SDimitry Andric       assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
11730b57cec5SDimitry Andric              "Invalid field offset!");
11740b57cec5SDimitry Andric       continue;
11750b57cec5SDimitry Andric     }
11760b57cec5SDimitry Andric 
11770b57cec5SDimitry Andric     // Ignore unnamed bit-fields.
11780b57cec5SDimitry Andric     if (!FD->getDeclName())
11790b57cec5SDimitry Andric       continue;
11800b57cec5SDimitry Andric 
11810b57cec5SDimitry Andric     const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
11820b57cec5SDimitry Andric     llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
11830b57cec5SDimitry Andric 
11840b57cec5SDimitry Andric     // Unions have overlapping elements dictating their layout, but for
11850b57cec5SDimitry Andric     // non-unions we can verify that this section of the layout is the exact
11860b57cec5SDimitry Andric     // expected size.
11870b57cec5SDimitry Andric     if (D->isUnion()) {
11880b57cec5SDimitry Andric       // For unions we verify that the start is zero and the size
11890b57cec5SDimitry Andric       // is in-bounds. However, on BE systems, the offset may be non-zero, but
11900b57cec5SDimitry Andric       // the size + offset should match the storage size in that case as it
11910b57cec5SDimitry Andric       // "starts" at the back.
11920b57cec5SDimitry Andric       if (getDataLayout().isBigEndian())
11930b57cec5SDimitry Andric         assert(static_cast<unsigned>(Info.Offset + Info.Size) ==
11940b57cec5SDimitry Andric                Info.StorageSize &&
11950b57cec5SDimitry Andric                "Big endian union bitfield does not end at the back");
11960b57cec5SDimitry Andric       else
11970b57cec5SDimitry Andric         assert(Info.Offset == 0 &&
11980b57cec5SDimitry Andric                "Little endian union bitfield with a non-zero offset");
11990b57cec5SDimitry Andric       assert(Info.StorageSize <= SL->getSizeInBits() &&
12000b57cec5SDimitry Andric              "Union not large enough for bitfield storage");
12010b57cec5SDimitry Andric     } else {
1202e8d8bef9SDimitry Andric       assert((Info.StorageSize ==
1203e8d8bef9SDimitry Andric                   getDataLayout().getTypeAllocSizeInBits(ElementTy) ||
1204e8d8bef9SDimitry Andric               Info.VolatileStorageSize ==
1205e8d8bef9SDimitry Andric                   getDataLayout().getTypeAllocSizeInBits(ElementTy)) &&
12060b57cec5SDimitry Andric              "Storage size does not match the element type size");
12070b57cec5SDimitry Andric     }
12080b57cec5SDimitry Andric     assert(Info.Size > 0 && "Empty bitfield!");
12090b57cec5SDimitry Andric     assert(static_cast<unsigned>(Info.Offset) + Info.Size <= Info.StorageSize &&
12100b57cec5SDimitry Andric            "Bitfield outside of its allocated storage");
12110b57cec5SDimitry Andric   }
12120b57cec5SDimitry Andric #endif
12130b57cec5SDimitry Andric 
12140b57cec5SDimitry Andric   return RL;
12150b57cec5SDimitry Andric }
12160b57cec5SDimitry Andric 
12170b57cec5SDimitry Andric void CGRecordLayout::print(raw_ostream &OS) const {
12180b57cec5SDimitry Andric   OS << "<CGRecordLayout\n";
12190b57cec5SDimitry Andric   OS << "  LLVMType:" << *CompleteObjectType << "\n";
12200b57cec5SDimitry Andric   if (BaseSubobjectType)
12210b57cec5SDimitry Andric     OS << "  NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
12220b57cec5SDimitry Andric   OS << "  IsZeroInitializable:" << IsZeroInitializable << "\n";
12230b57cec5SDimitry Andric   OS << "  BitFields:[\n";
12240b57cec5SDimitry Andric 
12250b57cec5SDimitry Andric   // Print bit-field infos in declaration order.
12260b57cec5SDimitry Andric   std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
12270b57cec5SDimitry Andric   for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
12280b57cec5SDimitry Andric          it = BitFields.begin(), ie = BitFields.end();
12290b57cec5SDimitry Andric        it != ie; ++it) {
12300b57cec5SDimitry Andric     const RecordDecl *RD = it->first->getParent();
12310b57cec5SDimitry Andric     unsigned Index = 0;
12320b57cec5SDimitry Andric     for (RecordDecl::field_iterator
12330b57cec5SDimitry Andric            it2 = RD->field_begin(); *it2 != it->first; ++it2)
12340b57cec5SDimitry Andric       ++Index;
12350b57cec5SDimitry Andric     BFIs.push_back(std::make_pair(Index, &it->second));
12360b57cec5SDimitry Andric   }
12370b57cec5SDimitry Andric   llvm::array_pod_sort(BFIs.begin(), BFIs.end());
12380b57cec5SDimitry Andric   for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
12390b57cec5SDimitry Andric     OS.indent(4);
12400b57cec5SDimitry Andric     BFIs[i].second->print(OS);
12410b57cec5SDimitry Andric     OS << "\n";
12420b57cec5SDimitry Andric   }
12430b57cec5SDimitry Andric 
12440b57cec5SDimitry Andric   OS << "]>\n";
12450b57cec5SDimitry Andric }
12460b57cec5SDimitry Andric 
12470b57cec5SDimitry Andric LLVM_DUMP_METHOD void CGRecordLayout::dump() const {
12480b57cec5SDimitry Andric   print(llvm::errs());
12490b57cec5SDimitry Andric }
12500b57cec5SDimitry Andric 
12510b57cec5SDimitry Andric void CGBitFieldInfo::print(raw_ostream &OS) const {
12520b57cec5SDimitry Andric   OS << "<CGBitFieldInfo"
1253e8d8bef9SDimitry Andric      << " Offset:" << Offset << " Size:" << Size << " IsSigned:" << IsSigned
12540b57cec5SDimitry Andric      << " StorageSize:" << StorageSize
1255e8d8bef9SDimitry Andric      << " StorageOffset:" << StorageOffset.getQuantity()
1256e8d8bef9SDimitry Andric      << " VolatileOffset:" << VolatileOffset
1257e8d8bef9SDimitry Andric      << " VolatileStorageSize:" << VolatileStorageSize
1258e8d8bef9SDimitry Andric      << " VolatileStorageOffset:" << VolatileStorageOffset.getQuantity() << ">";
12590b57cec5SDimitry Andric }
12600b57cec5SDimitry Andric 
12610b57cec5SDimitry Andric LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const {
12620b57cec5SDimitry Andric   print(llvm::errs());
12630b57cec5SDimitry Andric }
1264