Home | History | Annotate | Line # | Download | only in CodeGen
      1 //===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder  ----*- C++ -*-===//
      2 //
      3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
      4 // See https://llvm.org/LICENSE.txt for license information.
      5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
      6 //
      7 //===----------------------------------------------------------------------===//
      8 //
      9 // Builder implementation for CGRecordLayout objects.
     10 //
     11 //===----------------------------------------------------------------------===//
     12 
     13 #include "CGRecordLayout.h"
     14 #include "CGCXXABI.h"
     15 #include "CodeGenTypes.h"
     16 #include "clang/AST/ASTContext.h"
     17 #include "clang/AST/Attr.h"
     18 #include "clang/AST/CXXInheritance.h"
     19 #include "clang/AST/DeclCXX.h"
     20 #include "clang/AST/Expr.h"
     21 #include "clang/AST/RecordLayout.h"
     22 #include "clang/Basic/CodeGenOptions.h"
     23 #include "llvm/IR/DataLayout.h"
     24 #include "llvm/IR/DerivedTypes.h"
     25 #include "llvm/IR/Type.h"
     26 #include "llvm/Support/Debug.h"
     27 #include "llvm/Support/MathExtras.h"
     28 #include "llvm/Support/raw_ostream.h"
     29 using namespace clang;
     30 using namespace CodeGen;
     31 
     32 namespace {
     33 /// The CGRecordLowering is responsible for lowering an ASTRecordLayout to an
     34 /// llvm::Type.  Some of the lowering is straightforward, some is not.  Here we
     35 /// detail some of the complexities and weirdnesses here.
     36 /// * LLVM does not have unions - Unions can, in theory be represented by any
     37 ///   llvm::Type with correct size.  We choose a field via a specific heuristic
     38 ///   and add padding if necessary.
     39 /// * LLVM does not have bitfields - Bitfields are collected into contiguous
     40 ///   runs and allocated as a single storage type for the run.  ASTRecordLayout
     41 ///   contains enough information to determine where the runs break.  Microsoft
     42 ///   and Itanium follow different rules and use different codepaths.
     43 /// * It is desired that, when possible, bitfields use the appropriate iN type
     44 ///   when lowered to llvm types.  For example unsigned x : 24 gets lowered to
     45 ///   i24.  This isn't always possible because i24 has storage size of 32 bit
     46 ///   and if it is possible to use that extra byte of padding we must use
     47 ///   [i8 x 3] instead of i24.  The function clipTailPadding does this.
     48 ///   C++ examples that require clipping:
     49 ///   struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
     50 ///   struct A { int a : 24; }; // a must be clipped because a struct like B
     51 //    could exist: struct B : A { char b; }; // b goes at offset 3
     52 /// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
     53 ///   fields.  The existing asserts suggest that LLVM assumes that *every* field
     54 ///   has an underlying storage type.  Therefore empty structures containing
     55 ///   zero sized subobjects such as empty records or zero sized arrays still get
     56 ///   a zero sized (empty struct) storage type.
     57 /// * Clang reads the complete type rather than the base type when generating
     58 ///   code to access fields.  Bitfields in tail position with tail padding may
     59 ///   be clipped in the base class but not the complete class (we may discover
     60 ///   that the tail padding is not used in the complete class.) However,
     61 ///   because LLVM reads from the complete type it can generate incorrect code
     62 ///   if we do not clip the tail padding off of the bitfield in the complete
     63 ///   layout.  This introduces a somewhat awkward extra unnecessary clip stage.
     64 ///   The location of the clip is stored internally as a sentinel of type
     65 ///   SCISSOR.  If LLVM were updated to read base types (which it probably
     66 ///   should because locations of things such as VBases are bogus in the llvm
     67 ///   type anyway) then we could eliminate the SCISSOR.
     68 /// * Itanium allows nearly empty primary virtual bases.  These bases don't get
     69 ///   get their own storage because they're laid out as part of another base
     70 ///   or at the beginning of the structure.  Determining if a VBase actually
     71 ///   gets storage awkwardly involves a walk of all bases.
     72 /// * VFPtrs and VBPtrs do *not* make a record NotZeroInitializable.
     73 struct CGRecordLowering {
     74   // MemberInfo is a helper structure that contains information about a record
     75   // member.  In additional to the standard member types, there exists a
     76   // sentinel member type that ensures correct rounding.
     77   struct MemberInfo {
     78     CharUnits Offset;
     79     enum InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } Kind;
     80     llvm::Type *Data;
     81     union {
     82       const FieldDecl *FD;
     83       const CXXRecordDecl *RD;
     84     };
     85     MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
     86                const FieldDecl *FD = nullptr)
     87       : Offset(Offset), Kind(Kind), Data(Data), FD(FD) {}
     88     MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
     89                const CXXRecordDecl *RD)
     90       : Offset(Offset), Kind(Kind), Data(Data), RD(RD) {}
     91     // MemberInfos are sorted so we define a < operator.
     92     bool operator <(const MemberInfo& a) const { return Offset < a.Offset; }
     93   };
     94   // The constructor.
     95   CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
     96   // Short helper routines.
     97   /// Constructs a MemberInfo instance from an offset and llvm::Type *.
     98   MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
     99     return MemberInfo(Offset, MemberInfo::Field, Data);
    100   }
    101 
    102   /// The Microsoft bitfield layout rule allocates discrete storage
    103   /// units of the field's formal type and only combines adjacent
    104   /// fields of the same formal type.  We want to emit a layout with
    105   /// these discrete storage units instead of combining them into a
    106   /// continuous run.
    107   bool isDiscreteBitFieldABI() {
    108     return Context.getTargetInfo().getCXXABI().isMicrosoft() ||
    109            D->isMsStruct(Context);
    110   }
    111 
    112   /// Helper function to check if we are targeting AAPCS.
    113   bool isAAPCS() const {
    114     return Context.getTargetInfo().getABI().startswith("aapcs");
    115   }
    116 
    117   /// Helper function to check if the target machine is BigEndian.
    118   bool isBE() const { return Context.getTargetInfo().isBigEndian(); }
    119 
    120   /// The Itanium base layout rule allows virtual bases to overlap
    121   /// other bases, which complicates layout in specific ways.
    122   ///
    123   /// Note specifically that the ms_struct attribute doesn't change this.
    124   bool isOverlappingVBaseABI() {
    125     return !Context.getTargetInfo().getCXXABI().isMicrosoft();
    126   }
    127 
    128   /// Wraps llvm::Type::getIntNTy with some implicit arguments.
    129   llvm::Type *getIntNType(uint64_t NumBits) {
    130     unsigned AlignedBits = llvm::alignTo(NumBits, Context.getCharWidth());
    131     return llvm::Type::getIntNTy(Types.getLLVMContext(), AlignedBits);
    132   }
    133   /// Get the LLVM type sized as one character unit.
    134   llvm::Type *getCharType() {
    135     return llvm::Type::getIntNTy(Types.getLLVMContext(),
    136                                  Context.getCharWidth());
    137   }
    138   /// Gets an llvm type of size NumChars and alignment 1.
    139   llvm::Type *getByteArrayType(CharUnits NumChars) {
    140     assert(!NumChars.isZero() && "Empty byte arrays aren't allowed.");
    141     llvm::Type *Type = getCharType();
    142     return NumChars == CharUnits::One() ? Type :
    143         (llvm::Type *)llvm::ArrayType::get(Type, NumChars.getQuantity());
    144   }
    145   /// Gets the storage type for a field decl and handles storage
    146   /// for itanium bitfields that are smaller than their declared type.
    147   llvm::Type *getStorageType(const FieldDecl *FD) {
    148     llvm::Type *Type = Types.ConvertTypeForMem(FD->getType());
    149     if (!FD->isBitField()) return Type;
    150     if (isDiscreteBitFieldABI()) return Type;
    151     return getIntNType(std::min(FD->getBitWidthValue(Context),
    152                              (unsigned)Context.toBits(getSize(Type))));
    153   }
    154   /// Gets the llvm Basesubobject type from a CXXRecordDecl.
    155   llvm::Type *getStorageType(const CXXRecordDecl *RD) {
    156     return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
    157   }
    158   CharUnits bitsToCharUnits(uint64_t BitOffset) {
    159     return Context.toCharUnitsFromBits(BitOffset);
    160   }
    161   CharUnits getSize(llvm::Type *Type) {
    162     return CharUnits::fromQuantity(DataLayout.getTypeAllocSize(Type));
    163   }
    164   CharUnits getAlignment(llvm::Type *Type) {
    165     return CharUnits::fromQuantity(DataLayout.getABITypeAlignment(Type));
    166   }
    167   bool isZeroInitializable(const FieldDecl *FD) {
    168     return Types.isZeroInitializable(FD->getType());
    169   }
    170   bool isZeroInitializable(const RecordDecl *RD) {
    171     return Types.isZeroInitializable(RD);
    172   }
    173   void appendPaddingBytes(CharUnits Size) {
    174     if (!Size.isZero())
    175       FieldTypes.push_back(getByteArrayType(Size));
    176   }
    177   uint64_t getFieldBitOffset(const FieldDecl *FD) {
    178     return Layout.getFieldOffset(FD->getFieldIndex());
    179   }
    180   // Layout routines.
    181   void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset,
    182                        llvm::Type *StorageType);
    183   /// Lowers an ASTRecordLayout to a llvm type.
    184   void lower(bool NonVirtualBaseType);
    185   void lowerUnion();
    186   void accumulateFields();
    187   void accumulateBitFields(RecordDecl::field_iterator Field,
    188                            RecordDecl::field_iterator FieldEnd);
    189   void computeVolatileBitfields();
    190   void accumulateBases();
    191   void accumulateVPtrs();
    192   void accumulateVBases();
    193   /// Recursively searches all of the bases to find out if a vbase is
    194   /// not the primary vbase of some base class.
    195   bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query);
    196   void calculateZeroInit();
    197   /// Lowers bitfield storage types to I8 arrays for bitfields with tail
    198   /// padding that is or can potentially be used.
    199   void clipTailPadding();
    200   /// Determines if we need a packed llvm struct.
    201   void determinePacked(bool NVBaseType);
    202   /// Inserts padding everywhere it's needed.
    203   void insertPadding();
    204   /// Fills out the structures that are ultimately consumed.
    205   void fillOutputFields();
    206   // Input memoization fields.
    207   CodeGenTypes &Types;
    208   const ASTContext &Context;
    209   const RecordDecl *D;
    210   const CXXRecordDecl *RD;
    211   const ASTRecordLayout &Layout;
    212   const llvm::DataLayout &DataLayout;
    213   // Helpful intermediate data-structures.
    214   std::vector<MemberInfo> Members;
    215   // Output fields, consumed by CodeGenTypes::ComputeRecordLayout.
    216   SmallVector<llvm::Type *, 16> FieldTypes;
    217   llvm::DenseMap<const FieldDecl *, unsigned> Fields;
    218   llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
    219   llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
    220   llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
    221   bool IsZeroInitializable : 1;
    222   bool IsZeroInitializableAsBase : 1;
    223   bool Packed : 1;
    224 private:
    225   CGRecordLowering(const CGRecordLowering &) = delete;
    226   void operator =(const CGRecordLowering &) = delete;
    227 };
    228 } // namespace {
    229 
    230 CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D,
    231                                    bool Packed)
    232     : Types(Types), Context(Types.getContext()), D(D),
    233       RD(dyn_cast<CXXRecordDecl>(D)),
    234       Layout(Types.getContext().getASTRecordLayout(D)),
    235       DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
    236       IsZeroInitializableAsBase(true), Packed(Packed) {}
    237 
    238 void CGRecordLowering::setBitFieldInfo(
    239     const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) {
    240   CGBitFieldInfo &Info = BitFields[FD->getCanonicalDecl()];
    241   Info.IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
    242   Info.Offset = (unsigned)(getFieldBitOffset(FD) - Context.toBits(StartOffset));
    243   Info.Size = FD->getBitWidthValue(Context);
    244   Info.StorageSize = (unsigned)DataLayout.getTypeAllocSizeInBits(StorageType);
    245   Info.StorageOffset = StartOffset;
    246   if (Info.Size > Info.StorageSize)
    247     Info.Size = Info.StorageSize;
    248   // Reverse the bit offsets for big endian machines. Because we represent
    249   // a bitfield as a single large integer load, we can imagine the bits
    250   // counting from the most-significant-bit instead of the
    251   // least-significant-bit.
    252   if (DataLayout.isBigEndian())
    253     Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
    254 
    255   Info.VolatileStorageSize = 0;
    256   Info.VolatileOffset = 0;
    257   Info.VolatileStorageOffset = CharUnits::Zero();
    258 }
    259 
    260 void CGRecordLowering::lower(bool NVBaseType) {
    261   // The lowering process implemented in this function takes a variety of
    262   // carefully ordered phases.
    263   // 1) Store all members (fields and bases) in a list and sort them by offset.
    264   // 2) Add a 1-byte capstone member at the Size of the structure.
    265   // 3) Clip bitfield storages members if their tail padding is or might be
    266   //    used by another field or base.  The clipping process uses the capstone
    267   //    by treating it as another object that occurs after the record.
    268   // 4) Determine if the llvm-struct requires packing.  It's important that this
    269   //    phase occur after clipping, because clipping changes the llvm type.
    270   //    This phase reads the offset of the capstone when determining packedness
    271   //    and updates the alignment of the capstone to be equal of the alignment
    272   //    of the record after doing so.
    273   // 5) Insert padding everywhere it is needed.  This phase requires 'Packed' to
    274   //    have been computed and needs to know the alignment of the record in
    275   //    order to understand if explicit tail padding is needed.
    276   // 6) Remove the capstone, we don't need it anymore.
    277   // 7) Determine if this record can be zero-initialized.  This phase could have
    278   //    been placed anywhere after phase 1.
    279   // 8) Format the complete list of members in a way that can be consumed by
    280   //    CodeGenTypes::ComputeRecordLayout.
    281   CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
    282   if (D->isUnion()) {
    283     lowerUnion();
    284     computeVolatileBitfields();
    285     return;
    286   }
    287   accumulateFields();
    288   // RD implies C++.
    289   if (RD) {
    290     accumulateVPtrs();
    291     accumulateBases();
    292     if (Members.empty()) {
    293       appendPaddingBytes(Size);
    294       computeVolatileBitfields();
    295       return;
    296     }
    297     if (!NVBaseType)
    298       accumulateVBases();
    299   }
    300   llvm::stable_sort(Members);
    301   Members.push_back(StorageInfo(Size, getIntNType(8)));
    302   clipTailPadding();
    303   determinePacked(NVBaseType);
    304   insertPadding();
    305   Members.pop_back();
    306   calculateZeroInit();
    307   fillOutputFields();
    308   computeVolatileBitfields();
    309 }
    310 
    311 void CGRecordLowering::lowerUnion() {
    312   CharUnits LayoutSize = Layout.getSize();
    313   llvm::Type *StorageType = nullptr;
    314   bool SeenNamedMember = false;
    315   // Iterate through the fields setting bitFieldInfo and the Fields array. Also
    316   // locate the "most appropriate" storage type.  The heuristic for finding the
    317   // storage type isn't necessary, the first (non-0-length-bitfield) field's
    318   // type would work fine and be simpler but would be different than what we've
    319   // been doing and cause lit tests to change.
    320   for (const auto *Field : D->fields()) {
    321     if (Field->isBitField()) {
    322       if (Field->isZeroLengthBitField(Context))
    323         continue;
    324       llvm::Type *FieldType = getStorageType(Field);
    325       if (LayoutSize < getSize(FieldType))
    326         FieldType = getByteArrayType(LayoutSize);
    327       setBitFieldInfo(Field, CharUnits::Zero(), FieldType);
    328     }
    329     Fields[Field->getCanonicalDecl()] = 0;
    330     llvm::Type *FieldType = getStorageType(Field);
    331     // Compute zero-initializable status.
    332     // This union might not be zero initialized: it may contain a pointer to
    333     // data member which might have some exotic initialization sequence.
    334     // If this is the case, then we aught not to try and come up with a "better"
    335     // type, it might not be very easy to come up with a Constant which
    336     // correctly initializes it.
    337     if (!SeenNamedMember) {
    338       SeenNamedMember = Field->getIdentifier();
    339       if (!SeenNamedMember)
    340         if (const auto *FieldRD = Field->getType()->getAsRecordDecl())
    341           SeenNamedMember = FieldRD->findFirstNamedDataMember();
    342       if (SeenNamedMember && !isZeroInitializable(Field)) {
    343         IsZeroInitializable = IsZeroInitializableAsBase = false;
    344         StorageType = FieldType;
    345       }
    346     }
    347     // Because our union isn't zero initializable, we won't be getting a better
    348     // storage type.
    349     if (!IsZeroInitializable)
    350       continue;
    351     // Conditionally update our storage type if we've got a new "better" one.
    352     if (!StorageType ||
    353         getAlignment(FieldType) >  getAlignment(StorageType) ||
    354         (getAlignment(FieldType) == getAlignment(StorageType) &&
    355         getSize(FieldType) > getSize(StorageType)))
    356       StorageType = FieldType;
    357   }
    358   // If we have no storage type just pad to the appropriate size and return.
    359   if (!StorageType)
    360     return appendPaddingBytes(LayoutSize);
    361   // If our storage size was bigger than our required size (can happen in the
    362   // case of packed bitfields on Itanium) then just use an I8 array.
    363   if (LayoutSize < getSize(StorageType))
    364     StorageType = getByteArrayType(LayoutSize);
    365   FieldTypes.push_back(StorageType);
    366   appendPaddingBytes(LayoutSize - getSize(StorageType));
    367   // Set packed if we need it.
    368   if (LayoutSize % getAlignment(StorageType))
    369     Packed = true;
    370 }
    371 
    372 void CGRecordLowering::accumulateFields() {
    373   for (RecordDecl::field_iterator Field = D->field_begin(),
    374                                   FieldEnd = D->field_end();
    375     Field != FieldEnd;) {
    376     if (Field->isBitField()) {
    377       RecordDecl::field_iterator Start = Field;
    378       // Iterate to gather the list of bitfields.
    379       for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
    380       accumulateBitFields(Start, Field);
    381     } else if (!Field->isZeroSize(Context)) {
    382       Members.push_back(MemberInfo(
    383           bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
    384           getStorageType(*Field), *Field));
    385       ++Field;
    386     } else {
    387       ++Field;
    388     }
    389   }
    390 }
    391 
    392 void
    393 CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
    394                                       RecordDecl::field_iterator FieldEnd) {
    395   // Run stores the first element of the current run of bitfields.  FieldEnd is
    396   // used as a special value to note that we don't have a current run.  A
    397   // bitfield run is a contiguous collection of bitfields that can be stored in
    398   // the same storage block.  Zero-sized bitfields and bitfields that would
    399   // cross an alignment boundary break a run and start a new one.
    400   RecordDecl::field_iterator Run = FieldEnd;
    401   // Tail is the offset of the first bit off the end of the current run.  It's
    402   // used to determine if the ASTRecordLayout is treating these two bitfields as
    403   // contiguous.  StartBitOffset is offset of the beginning of the Run.
    404   uint64_t StartBitOffset, Tail = 0;
    405   if (isDiscreteBitFieldABI()) {
    406     for (; Field != FieldEnd; ++Field) {
    407       uint64_t BitOffset = getFieldBitOffset(*Field);
    408       // Zero-width bitfields end runs.
    409       if (Field->isZeroLengthBitField(Context)) {
    410         Run = FieldEnd;
    411         continue;
    412       }
    413       llvm::Type *Type =
    414           Types.ConvertTypeForMem(Field->getType(), /*ForBitFields=*/true);
    415       // If we don't have a run yet, or don't live within the previous run's
    416       // allocated storage then we allocate some storage and start a new run.
    417       if (Run == FieldEnd || BitOffset >= Tail) {
    418         Run = Field;
    419         StartBitOffset = BitOffset;
    420         Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
    421         // Add the storage member to the record.  This must be added to the
    422         // record before the bitfield members so that it gets laid out before
    423         // the bitfields it contains get laid out.
    424         Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
    425       }
    426       // Bitfields get the offset of their storage but come afterward and remain
    427       // there after a stable sort.
    428       Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
    429                                    MemberInfo::Field, nullptr, *Field));
    430     }
    431     return;
    432   }
    433 
    434   // Check if OffsetInRecord (the size in bits of the current run) is better
    435   // as a single field run. When OffsetInRecord has legal integer width, and
    436   // its bitfield offset is naturally aligned, it is better to make the
    437   // bitfield a separate storage component so as it can be accessed directly
    438   // with lower cost.
    439   auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord,
    440                                       uint64_t StartBitOffset) {
    441     if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
    442       return false;
    443     if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
    444         !DataLayout.fitsInLegalInteger(OffsetInRecord))
    445       return false;
    446     // Make sure StartBitOffset is naturally aligned if it is treated as an
    447     // IType integer.
    448     if (StartBitOffset %
    449             Context.toBits(getAlignment(getIntNType(OffsetInRecord))) !=
    450         0)
    451       return false;
    452     return true;
    453   };
    454 
    455   // The start field is better as a single field run.
    456   bool StartFieldAsSingleRun = false;
    457   for (;;) {
    458     // Check to see if we need to start a new run.
    459     if (Run == FieldEnd) {
    460       // If we're out of fields, return.
    461       if (Field == FieldEnd)
    462         break;
    463       // Any non-zero-length bitfield can start a new run.
    464       if (!Field->isZeroLengthBitField(Context)) {
    465         Run = Field;
    466         StartBitOffset = getFieldBitOffset(*Field);
    467         Tail = StartBitOffset + Field->getBitWidthValue(Context);
    468         StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
    469                                                          StartBitOffset);
    470       }
    471       ++Field;
    472       continue;
    473     }
    474 
    475     // If the start field of a new run is better as a single run, or
    476     // if current field (or consecutive fields) is better as a single run, or
    477     // if current field has zero width bitfield and either
    478     // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
    479     // true, or
    480     // if the offset of current field is inconsistent with the offset of
    481     // previous field plus its offset,
    482     // skip the block below and go ahead to emit the storage.
    483     // Otherwise, try to add bitfields to the run.
    484     if (!StartFieldAsSingleRun && Field != FieldEnd &&
    485         !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
    486         (!Field->isZeroLengthBitField(Context) ||
    487          (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
    488           !Context.getTargetInfo().useBitFieldTypeAlignment())) &&
    489         Tail == getFieldBitOffset(*Field)) {
    490       Tail += Field->getBitWidthValue(Context);
    491       ++Field;
    492       continue;
    493     }
    494 
    495     // We've hit a break-point in the run and need to emit a storage field.
    496     llvm::Type *Type = getIntNType(Tail - StartBitOffset);
    497     // Add the storage member to the record and set the bitfield info for all of
    498     // the bitfields in the run.  Bitfields get the offset of their storage but
    499     // come afterward and remain there after a stable sort.
    500     Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
    501     for (; Run != Field; ++Run)
    502       Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
    503                                    MemberInfo::Field, nullptr, *Run));
    504     Run = FieldEnd;
    505     StartFieldAsSingleRun = false;
    506   }
    507 }
    508 
    509 void CGRecordLowering::accumulateBases() {
    510   // If we've got a primary virtual base, we need to add it with the bases.
    511   if (Layout.isPrimaryBaseVirtual()) {
    512     const CXXRecordDecl *BaseDecl = Layout.getPrimaryBase();
    513     Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::Base,
    514                                  getStorageType(BaseDecl), BaseDecl));
    515   }
    516   // Accumulate the non-virtual bases.
    517   for (const auto &Base : RD->bases()) {
    518     if (Base.isVirtual())
    519       continue;
    520 
    521     // Bases can be zero-sized even if not technically empty if they
    522     // contain only a trailing array member.
    523     const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
    524     if (!BaseDecl->isEmpty() &&
    525         !Context.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
    526       Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
    527           MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
    528   }
    529 }
    530 
    531 /// The AAPCS that defines that, when possible, bit-fields should
    532 /// be accessed using containers of the declared type width:
    533 /// When a volatile bit-field is read, and its container does not overlap with
    534 /// any non-bit-field member or any zero length bit-field member, its container
    535 /// must be read exactly once using the access width appropriate to the type of
    536 /// the container. When a volatile bit-field is written, and its container does
    537 /// not overlap with any non-bit-field member or any zero-length bit-field
    538 /// member, its container must be read exactly once and written exactly once
    539 /// using the access width appropriate to the type of the container. The two
    540 /// accesses are not atomic.
    541 ///
    542 /// Enforcing the width restriction can be disabled using
    543 /// -fno-aapcs-bitfield-width.
    544 void CGRecordLowering::computeVolatileBitfields() {
    545   if (!isAAPCS() || !Types.getCodeGenOpts().AAPCSBitfieldWidth)
    546     return;
    547 
    548   for (auto &I : BitFields) {
    549     const FieldDecl *Field = I.first;
    550     CGBitFieldInfo &Info = I.second;
    551     llvm::Type *ResLTy = Types.ConvertTypeForMem(Field->getType());
    552     // If the record alignment is less than the type width, we can't enforce a
    553     // aligned load, bail out.
    554     if ((uint64_t)(Context.toBits(Layout.getAlignment())) <
    555         ResLTy->getPrimitiveSizeInBits())
    556       continue;
    557     // CGRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
    558     // for big-endian targets, but it assumes a container of width
    559     // Info.StorageSize. Since AAPCS uses a different container size (width
    560     // of the type), we first undo that calculation here and redo it once
    561     // the bit-field offset within the new container is calculated.
    562     const unsigned OldOffset =
    563         isBE() ? Info.StorageSize - (Info.Offset + Info.Size) : Info.Offset;
    564     // Offset to the bit-field from the beginning of the struct.
    565     const unsigned AbsoluteOffset =
    566         Context.toBits(Info.StorageOffset) + OldOffset;
    567 
    568     // Container size is the width of the bit-field type.
    569     const unsigned StorageSize = ResLTy->getPrimitiveSizeInBits();
    570     // Nothing to do if the access uses the desired
    571     // container width and is naturally aligned.
    572     if (Info.StorageSize == StorageSize && (OldOffset % StorageSize == 0))
    573       continue;
    574 
    575     // Offset within the container.
    576     unsigned Offset = AbsoluteOffset & (StorageSize - 1);
    577     // Bail out if an aligned load of the container cannot cover the entire
    578     // bit-field. This can happen for example, if the bit-field is part of a
    579     // packed struct. AAPCS does not define access rules for such cases, we let
    580     // clang to follow its own rules.
    581     if (Offset + Info.Size > StorageSize)
    582       continue;
    583 
    584     // Re-adjust offsets for big-endian targets.
    585     if (isBE())
    586       Offset = StorageSize - (Offset + Info.Size);
    587 
    588     const CharUnits StorageOffset =
    589         Context.toCharUnitsFromBits(AbsoluteOffset & ~(StorageSize - 1));
    590     const CharUnits End = StorageOffset +
    591                           Context.toCharUnitsFromBits(StorageSize) -
    592                           CharUnits::One();
    593 
    594     const ASTRecordLayout &Layout =
    595         Context.getASTRecordLayout(Field->getParent());
    596     // If we access outside memory outside the record, than bail out.
    597     const CharUnits RecordSize = Layout.getSize();
    598     if (End >= RecordSize)
    599       continue;
    600 
    601     // Bail out if performing this load would access non-bit-fields members.
    602     bool Conflict = false;
    603     for (const auto *F : D->fields()) {
    604       // Allow sized bit-fields overlaps.
    605       if (F->isBitField() && !F->isZeroLengthBitField(Context))
    606         continue;
    607 
    608       const CharUnits FOffset = Context.toCharUnitsFromBits(
    609           Layout.getFieldOffset(F->getFieldIndex()));
    610 
    611       // As C11 defines, a zero sized bit-field defines a barrier, so
    612       // fields after and before it should be race condition free.
    613       // The AAPCS acknowledges it and imposes no restritions when the
    614       // natural container overlaps a zero-length bit-field.
    615       if (F->isZeroLengthBitField(Context)) {
    616         if (End > FOffset && StorageOffset < FOffset) {
    617           Conflict = true;
    618           break;
    619         }
    620       }
    621 
    622       const CharUnits FEnd =
    623           FOffset +
    624           Context.toCharUnitsFromBits(
    625               Types.ConvertTypeForMem(F->getType())->getPrimitiveSizeInBits()) -
    626           CharUnits::One();
    627       // If no overlap, continue.
    628       if (End < FOffset || FEnd < StorageOffset)
    629         continue;
    630 
    631       // The desired load overlaps a non-bit-field member, bail out.
    632       Conflict = true;
    633       break;
    634     }
    635 
    636     if (Conflict)
    637       continue;
    638     // Write the new bit-field access parameters.
    639     // As the storage offset now is defined as the number of elements from the
    640     // start of the structure, we should divide the Offset by the element size.
    641     Info.VolatileStorageOffset =
    642         StorageOffset / Context.toCharUnitsFromBits(StorageSize).getQuantity();
    643     Info.VolatileStorageSize = StorageSize;
    644     Info.VolatileOffset = Offset;
    645   }
    646 }
    647 
    648 void CGRecordLowering::accumulateVPtrs() {
    649   if (Layout.hasOwnVFPtr())
    650     Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
    651         llvm::FunctionType::get(getIntNType(32), /*isVarArg=*/true)->
    652             getPointerTo()->getPointerTo()));
    653   if (Layout.hasOwnVBPtr())
    654     Members.push_back(MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr,
    655         llvm::Type::getInt32PtrTy(Types.getLLVMContext())));
    656 }
    657 
    658 void CGRecordLowering::accumulateVBases() {
    659   CharUnits ScissorOffset = Layout.getNonVirtualSize();
    660   // In the itanium ABI, it's possible to place a vbase at a dsize that is
    661   // smaller than the nvsize.  Here we check to see if such a base is placed
    662   // before the nvsize and set the scissor offset to that, instead of the
    663   // nvsize.
    664   if (isOverlappingVBaseABI())
    665     for (const auto &Base : RD->vbases()) {
    666       const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
    667       if (BaseDecl->isEmpty())
    668         continue;
    669       // If the vbase is a primary virtual base of some base, then it doesn't
    670       // get its own storage location but instead lives inside of that base.
    671       if (Context.isNearlyEmpty(BaseDecl) && !hasOwnStorage(RD, BaseDecl))
    672         continue;
    673       ScissorOffset = std::min(ScissorOffset,
    674                                Layout.getVBaseClassOffset(BaseDecl));
    675     }
    676   Members.push_back(MemberInfo(ScissorOffset, MemberInfo::Scissor, nullptr,
    677                                RD));
    678   for (const auto &Base : RD->vbases()) {
    679     const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
    680     if (BaseDecl->isEmpty())
    681       continue;
    682     CharUnits Offset = Layout.getVBaseClassOffset(BaseDecl);
    683     // If the vbase is a primary virtual base of some base, then it doesn't
    684     // get its own storage location but instead lives inside of that base.
    685     if (isOverlappingVBaseABI() &&
    686         Context.isNearlyEmpty(BaseDecl) &&
    687         !hasOwnStorage(RD, BaseDecl)) {
    688       Members.push_back(MemberInfo(Offset, MemberInfo::VBase, nullptr,
    689                                    BaseDecl));
    690       continue;
    691     }
    692     // If we've got a vtordisp, add it as a storage type.
    693     if (Layout.getVBaseOffsetsMap().find(BaseDecl)->second.hasVtorDisp())
    694       Members.push_back(StorageInfo(Offset - CharUnits::fromQuantity(4),
    695                                     getIntNType(32)));
    696     Members.push_back(MemberInfo(Offset, MemberInfo::VBase,
    697                                  getStorageType(BaseDecl), BaseDecl));
    698   }
    699 }
    700 
    701 bool CGRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl,
    702                                      const CXXRecordDecl *Query) {
    703   const ASTRecordLayout &DeclLayout = Context.getASTRecordLayout(Decl);
    704   if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query)
    705     return false;
    706   for (const auto &Base : Decl->bases())
    707     if (!hasOwnStorage(Base.getType()->getAsCXXRecordDecl(), Query))
    708       return false;
    709   return true;
    710 }
    711 
    712 void CGRecordLowering::calculateZeroInit() {
    713   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
    714                                                MemberEnd = Members.end();
    715        IsZeroInitializableAsBase && Member != MemberEnd; ++Member) {
    716     if (Member->Kind == MemberInfo::Field) {
    717       if (!Member->FD || isZeroInitializable(Member->FD))
    718         continue;
    719       IsZeroInitializable = IsZeroInitializableAsBase = false;
    720     } else if (Member->Kind == MemberInfo::Base ||
    721                Member->Kind == MemberInfo::VBase) {
    722       if (isZeroInitializable(Member->RD))
    723         continue;
    724       IsZeroInitializable = false;
    725       if (Member->Kind == MemberInfo::Base)
    726         IsZeroInitializableAsBase = false;
    727     }
    728   }
    729 }
    730 
    731 void CGRecordLowering::clipTailPadding() {
    732   std::vector<MemberInfo>::iterator Prior = Members.begin();
    733   CharUnits Tail = getSize(Prior->Data);
    734   for (std::vector<MemberInfo>::iterator Member = Prior + 1,
    735                                          MemberEnd = Members.end();
    736        Member != MemberEnd; ++Member) {
    737     // Only members with data and the scissor can cut into tail padding.
    738     if (!Member->Data && Member->Kind != MemberInfo::Scissor)
    739       continue;
    740     if (Member->Offset < Tail) {
    741       assert(Prior->Kind == MemberInfo::Field &&
    742              "Only storage fields have tail padding!");
    743       if (!Prior->FD || Prior->FD->isBitField())
    744         Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
    745             cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
    746       else {
    747         assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() &&
    748                "should not have reused this field's tail padding");
    749         Prior->Data = getByteArrayType(
    750             Context.getTypeInfoDataSizeInChars(Prior->FD->getType()).Width);
    751       }
    752     }
    753     if (Member->Data)
    754       Prior = Member;
    755     Tail = Prior->Offset + getSize(Prior->Data);
    756   }
    757 }
    758 
    759 void CGRecordLowering::determinePacked(bool NVBaseType) {
    760   if (Packed)
    761     return;
    762   CharUnits Alignment = CharUnits::One();
    763   CharUnits NVAlignment = CharUnits::One();
    764   CharUnits NVSize =
    765       !NVBaseType && RD ? Layout.getNonVirtualSize() : CharUnits::Zero();
    766   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
    767                                                MemberEnd = Members.end();
    768        Member != MemberEnd; ++Member) {
    769     if (!Member->Data)
    770       continue;
    771     // If any member falls at an offset that it not a multiple of its alignment,
    772     // then the entire record must be packed.
    773     if (Member->Offset % getAlignment(Member->Data))
    774       Packed = true;
    775     if (Member->Offset < NVSize)
    776       NVAlignment = std::max(NVAlignment, getAlignment(Member->Data));
    777     Alignment = std::max(Alignment, getAlignment(Member->Data));
    778   }
    779   // If the size of the record (the capstone's offset) is not a multiple of the
    780   // record's alignment, it must be packed.
    781   if (Members.back().Offset % Alignment)
    782     Packed = true;
    783   // If the non-virtual sub-object is not a multiple of the non-virtual
    784   // sub-object's alignment, it must be packed.  We cannot have a packed
    785   // non-virtual sub-object and an unpacked complete object or vise versa.
    786   if (NVSize % NVAlignment)
    787     Packed = true;
    788   // Update the alignment of the sentinel.
    789   if (!Packed)
    790     Members.back().Data = getIntNType(Context.toBits(Alignment));
    791 }
    792 
    793 void CGRecordLowering::insertPadding() {
    794   std::vector<std::pair<CharUnits, CharUnits> > Padding;
    795   CharUnits Size = CharUnits::Zero();
    796   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
    797                                                MemberEnd = Members.end();
    798        Member != MemberEnd; ++Member) {
    799     if (!Member->Data)
    800       continue;
    801     CharUnits Offset = Member->Offset;
    802     assert(Offset >= Size);
    803     // Insert padding if we need to.
    804     if (Offset !=
    805         Size.alignTo(Packed ? CharUnits::One() : getAlignment(Member->Data)))
    806       Padding.push_back(std::make_pair(Size, Offset - Size));
    807     Size = Offset + getSize(Member->Data);
    808   }
    809   if (Padding.empty())
    810     return;
    811   // Add the padding to the Members list and sort it.
    812   for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
    813         Pad = Padding.begin(), PadEnd = Padding.end();
    814         Pad != PadEnd; ++Pad)
    815     Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
    816   llvm::stable_sort(Members);
    817 }
    818 
    819 void CGRecordLowering::fillOutputFields() {
    820   for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
    821                                                MemberEnd = Members.end();
    822        Member != MemberEnd; ++Member) {
    823     if (Member->Data)
    824       FieldTypes.push_back(Member->Data);
    825     if (Member->Kind == MemberInfo::Field) {
    826       if (Member->FD)
    827         Fields[Member->FD->getCanonicalDecl()] = FieldTypes.size() - 1;
    828       // A field without storage must be a bitfield.
    829       if (!Member->Data)
    830         setBitFieldInfo(Member->FD, Member->Offset, FieldTypes.back());
    831     } else if (Member->Kind == MemberInfo::Base)
    832       NonVirtualBases[Member->RD] = FieldTypes.size() - 1;
    833     else if (Member->Kind == MemberInfo::VBase)
    834       VirtualBases[Member->RD] = FieldTypes.size() - 1;
    835   }
    836 }
    837 
    838 CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
    839                                         const FieldDecl *FD,
    840                                         uint64_t Offset, uint64_t Size,
    841                                         uint64_t StorageSize,
    842                                         CharUnits StorageOffset) {
    843   // This function is vestigial from CGRecordLayoutBuilder days but is still
    844   // used in GCObjCRuntime.cpp.  That usage has a "fixme" attached to it that
    845   // when addressed will allow for the removal of this function.
    846   llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
    847   CharUnits TypeSizeInBytes =
    848     CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
    849   uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
    850 
    851   bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
    852 
    853   if (Size > TypeSizeInBits) {
    854     // We have a wide bit-field. The extra bits are only used for padding, so
    855     // if we have a bitfield of type T, with size N:
    856     //
    857     // T t : N;
    858     //
    859     // We can just assume that it's:
    860     //
    861     // T t : sizeof(T);
    862     //
    863     Size = TypeSizeInBits;
    864   }
    865 
    866   // Reverse the bit offsets for big endian machines. Because we represent
    867   // a bitfield as a single large integer load, we can imagine the bits
    868   // counting from the most-significant-bit instead of the
    869   // least-significant-bit.
    870   if (Types.getDataLayout().isBigEndian()) {
    871     Offset = StorageSize - (Offset + Size);
    872   }
    873 
    874   return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset);
    875 }
    876 
    877 std::unique_ptr<CGRecordLayout>
    878 CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty) {
    879   CGRecordLowering Builder(*this, D, /*Packed=*/false);
    880 
    881   Builder.lower(/*NonVirtualBaseType=*/false);
    882 
    883   // If we're in C++, compute the base subobject type.
    884   llvm::StructType *BaseTy = nullptr;
    885   if (isa<CXXRecordDecl>(D) && !D->isUnion() && !D->hasAttr<FinalAttr>()) {
    886     BaseTy = Ty;
    887     if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
    888       CGRecordLowering BaseBuilder(*this, D, /*Packed=*/Builder.Packed);
    889       BaseBuilder.lower(/*NonVirtualBaseType=*/true);
    890       BaseTy = llvm::StructType::create(
    891           getLLVMContext(), BaseBuilder.FieldTypes, "", BaseBuilder.Packed);
    892       addRecordTypeName(D, BaseTy, ".base");
    893       // BaseTy and Ty must agree on their packedness for getLLVMFieldNo to work
    894       // on both of them with the same index.
    895       assert(Builder.Packed == BaseBuilder.Packed &&
    896              "Non-virtual and complete types must agree on packedness");
    897     }
    898   }
    899 
    900   // Fill in the struct *after* computing the base type.  Filling in the body
    901   // signifies that the type is no longer opaque and record layout is complete,
    902   // but we may need to recursively layout D while laying D out as a base type.
    903   Ty->setBody(Builder.FieldTypes, Builder.Packed);
    904 
    905   auto RL = std::make_unique<CGRecordLayout>(
    906       Ty, BaseTy, (bool)Builder.IsZeroInitializable,
    907       (bool)Builder.IsZeroInitializableAsBase);
    908 
    909   RL->NonVirtualBases.swap(Builder.NonVirtualBases);
    910   RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
    911 
    912   // Add all the field numbers.
    913   RL->FieldInfo.swap(Builder.Fields);
    914 
    915   // Add bitfield info.
    916   RL->BitFields.swap(Builder.BitFields);
    917 
    918   // Dump the layout, if requested.
    919   if (getContext().getLangOpts().DumpRecordLayouts) {
    920     llvm::outs() << "\n*** Dumping IRgen Record Layout\n";
    921     llvm::outs() << "Record: ";
    922     D->dump(llvm::outs());
    923     llvm::outs() << "\nLayout: ";
    924     RL->print(llvm::outs());
    925   }
    926 
    927 #ifndef NDEBUG
    928   // Verify that the computed LLVM struct size matches the AST layout size.
    929   const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
    930 
    931   uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
    932   assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) &&
    933          "Type size mismatch!");
    934 
    935   if (BaseTy) {
    936     CharUnits NonVirtualSize  = Layout.getNonVirtualSize();
    937 
    938     uint64_t AlignedNonVirtualTypeSizeInBits =
    939       getContext().toBits(NonVirtualSize);
    940 
    941     assert(AlignedNonVirtualTypeSizeInBits ==
    942            getDataLayout().getTypeAllocSizeInBits(BaseTy) &&
    943            "Type size mismatch!");
    944   }
    945 
    946   // Verify that the LLVM and AST field offsets agree.
    947   llvm::StructType *ST = RL->getLLVMType();
    948   const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST);
    949 
    950   const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
    951   RecordDecl::field_iterator it = D->field_begin();
    952   for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
    953     const FieldDecl *FD = *it;
    954 
    955     // Ignore zero-sized fields.
    956     if (FD->isZeroSize(getContext()))
    957       continue;
    958 
    959     // For non-bit-fields, just check that the LLVM struct offset matches the
    960     // AST offset.
    961     if (!FD->isBitField()) {
    962       unsigned FieldNo = RL->getLLVMFieldNo(FD);
    963       assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
    964              "Invalid field offset!");
    965       continue;
    966     }
    967 
    968     // Ignore unnamed bit-fields.
    969     if (!FD->getDeclName())
    970       continue;
    971 
    972     const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
    973     llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
    974 
    975     // Unions have overlapping elements dictating their layout, but for
    976     // non-unions we can verify that this section of the layout is the exact
    977     // expected size.
    978     if (D->isUnion()) {
    979       // For unions we verify that the start is zero and the size
    980       // is in-bounds. However, on BE systems, the offset may be non-zero, but
    981       // the size + offset should match the storage size in that case as it
    982       // "starts" at the back.
    983       if (getDataLayout().isBigEndian())
    984         assert(static_cast<unsigned>(Info.Offset + Info.Size) ==
    985                Info.StorageSize &&
    986                "Big endian union bitfield does not end at the back");
    987       else
    988         assert(Info.Offset == 0 &&
    989                "Little endian union bitfield with a non-zero offset");
    990       assert(Info.StorageSize <= SL->getSizeInBits() &&
    991              "Union not large enough for bitfield storage");
    992     } else {
    993       assert((Info.StorageSize ==
    994                   getDataLayout().getTypeAllocSizeInBits(ElementTy) ||
    995               Info.VolatileStorageSize ==
    996                   getDataLayout().getTypeAllocSizeInBits(ElementTy)) &&
    997              "Storage size does not match the element type size");
    998     }
    999     assert(Info.Size > 0 && "Empty bitfield!");
   1000     assert(static_cast<unsigned>(Info.Offset) + Info.Size <= Info.StorageSize &&
   1001            "Bitfield outside of its allocated storage");
   1002   }
   1003 #endif
   1004 
   1005   return RL;
   1006 }
   1007 
   1008 void CGRecordLayout::print(raw_ostream &OS) const {
   1009   OS << "<CGRecordLayout\n";
   1010   OS << "  LLVMType:" << *CompleteObjectType << "\n";
   1011   if (BaseSubobjectType)
   1012     OS << "  NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
   1013   OS << "  IsZeroInitializable:" << IsZeroInitializable << "\n";
   1014   OS << "  BitFields:[\n";
   1015 
   1016   // Print bit-field infos in declaration order.
   1017   std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
   1018   for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
   1019          it = BitFields.begin(), ie = BitFields.end();
   1020        it != ie; ++it) {
   1021     const RecordDecl *RD = it->first->getParent();
   1022     unsigned Index = 0;
   1023     for (RecordDecl::field_iterator
   1024            it2 = RD->field_begin(); *it2 != it->first; ++it2)
   1025       ++Index;
   1026     BFIs.push_back(std::make_pair(Index, &it->second));
   1027   }
   1028   llvm::array_pod_sort(BFIs.begin(), BFIs.end());
   1029   for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
   1030     OS.indent(4);
   1031     BFIs[i].second->print(OS);
   1032     OS << "\n";
   1033   }
   1034 
   1035   OS << "]>\n";
   1036 }
   1037 
   1038 LLVM_DUMP_METHOD void CGRecordLayout::dump() const {
   1039   print(llvm::errs());
   1040 }
   1041 
   1042 void CGBitFieldInfo::print(raw_ostream &OS) const {
   1043   OS << "<CGBitFieldInfo"
   1044      << " Offset:" << Offset << " Size:" << Size << " IsSigned:" << IsSigned
   1045      << " StorageSize:" << StorageSize
   1046      << " StorageOffset:" << StorageOffset.getQuantity()
   1047      << " VolatileOffset:" << VolatileOffset
   1048      << " VolatileStorageSize:" << VolatileStorageSize
   1049      << " VolatileStorageOffset:" << VolatileStorageOffset.getQuantity() << ">";
   1050 }
   1051 
   1052 LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const {
   1053   print(llvm::errs());
   1054 }
   1055