blob: 353f6cc796cd3efbe04b682fb4372252efe951da [file] [log] [blame]
Daniel Dunbar23ee4b72010-03-31 00:11:27 +00001//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
Anders Carlsson307846f2009-07-23 03:17:50 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
Daniel Dunbar23ee4b72010-03-31 00:11:27 +000010// Builder implementation for CGRecordLayout objects.
Anders Carlsson307846f2009-07-23 03:17:50 +000011//
12//===----------------------------------------------------------------------===//
13
Daniel Dunbar072d0bb2010-03-30 22:26:10 +000014#include "CGRecordLayout.h"
Chandler Carruth3a022472012-12-04 09:13:33 +000015#include "CGCXXABI.h"
16#include "CodeGenTypes.h"
Anders Carlsson307846f2009-07-23 03:17:50 +000017#include "clang/AST/ASTContext.h"
18#include "clang/AST/Attr.h"
Anders Carlsson4131f002010-11-24 22:50:27 +000019#include "clang/AST/CXXInheritance.h"
Anders Carlsson307846f2009-07-23 03:17:50 +000020#include "clang/AST/DeclCXX.h"
21#include "clang/AST/Expr.h"
22#include "clang/AST/RecordLayout.h"
Saleem Abdulrasool10a49722016-04-08 16:52:00 +000023#include "clang/Frontend/CodeGenOptions.h"
Chandler Carruthffd55512013-01-02 11:45:17 +000024#include "llvm/IR/DataLayout.h"
25#include "llvm/IR/DerivedTypes.h"
26#include "llvm/IR/Type.h"
Daniel Dunbar2ba67442010-04-21 19:10:49 +000027#include "llvm/Support/Debug.h"
Warren Huntfb00c882014-02-21 23:49:50 +000028#include "llvm/Support/MathExtras.h"
Daniel Dunbarb97bff92010-04-12 18:14:18 +000029#include "llvm/Support/raw_ostream.h"
Anders Carlsson307846f2009-07-23 03:17:50 +000030using namespace clang;
31using namespace CodeGen;
32
John McCallbcd38212010-11-30 23:17:27 +000033namespace {
Warren Huntfb00c882014-02-21 23:49:50 +000034/// The CGRecordLowering is responsible for lowering an ASTRecordLayout to an
35/// llvm::Type. Some of the lowering is straightforward, some is not. Here we
36/// detail some of the complexities and weirdnesses here.
37/// * LLVM does not have unions - Unions can, in theory be represented by any
38/// llvm::Type with correct size. We choose a field via a specific heuristic
39/// and add padding if necessary.
40/// * LLVM does not have bitfields - Bitfields are collected into contiguous
41/// runs and allocated as a single storage type for the run. ASTRecordLayout
42/// contains enough information to determine where the runs break. Microsoft
43/// and Itanium follow different rules and use different codepaths.
44/// * It is desired that, when possible, bitfields use the appropriate iN type
45/// when lowered to llvm types. For example unsigned x : 24 gets lowered to
46/// i24. This isn't always possible because i24 has storage size of 32 bit
47/// and if it is possible to use that extra byte of padding we must use
48/// [i8 x 3] instead of i24. The function clipTailPadding does this.
49/// C++ examples that require clipping:
50/// struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
51/// struct A { int a : 24; }; // a must be clipped because a struct like B
52// could exist: struct B : A { char b; }; // b goes at offset 3
53/// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
54/// fields. The existing asserts suggest that LLVM assumes that *every* field
55/// has an underlying storage type. Therefore empty structures containing
56/// zero sized subobjects such as empty records or zero sized arrays still get
57/// a zero sized (empty struct) storage type.
58/// * Clang reads the complete type rather than the base type when generating
59/// code to access fields. Bitfields in tail position with tail padding may
60/// be clipped in the base class but not the complete class (we may discover
61/// that the tail padding is not used in the complete class.) However,
62/// because LLVM reads from the complete type it can generate incorrect code
63/// if we do not clip the tail padding off of the bitfield in the complete
64/// layout. This introduces a somewhat awkward extra unnecessary clip stage.
Alexander Kornienko2a8c18d2018-04-06 15:14:32 +000065/// The location of the clip is stored internally as a sentinel of type
Warren Huntfb00c882014-02-21 23:49:50 +000066/// SCISSOR. If LLVM were updated to read base types (which it probably
67/// should because locations of things such as VBases are bogus in the llvm
68/// type anyway) then we could eliminate the SCISSOR.
69/// * Itanium allows nearly empty primary virtual bases. These bases don't get
70/// get their own storage because they're laid out as part of another base
71/// or at the beginning of the structure. Determining if a VBase actually
72/// gets storage awkwardly involves a walk of all bases.
73/// * VFPtrs and VBPtrs do *not* make a record NotZeroInitializable.
74struct CGRecordLowering {
75 // MemberInfo is a helper structure that contains information about a record
76 // member. In additional to the standard member types, there exists a
Alexander Kornienko2a8c18d2018-04-06 15:14:32 +000077 // sentinel member type that ensures correct rounding.
Warren Huntfb00c882014-02-21 23:49:50 +000078 struct MemberInfo {
79 CharUnits Offset;
80 enum InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } Kind;
81 llvm::Type *Data;
82 union {
83 const FieldDecl *FD;
84 const CXXRecordDecl *RD;
85 };
86 MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
Craig Topper8a13c412014-05-21 05:09:00 +000087 const FieldDecl *FD = nullptr)
Warren Huntfb00c882014-02-21 23:49:50 +000088 : Offset(Offset), Kind(Kind), Data(Data), FD(FD) {}
89 MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
90 const CXXRecordDecl *RD)
91 : Offset(Offset), Kind(Kind), Data(Data), RD(RD) {}
92 // MemberInfos are sorted so we define a < operator.
93 bool operator <(const MemberInfo& a) const { return Offset < a.Offset; }
94 };
95 // The constructor.
David Majnemerbb513002014-09-28 06:39:30 +000096 CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
Warren Huntfb00c882014-02-21 23:49:50 +000097 // Short helper routines.
98 /// \brief Constructs a MemberInfo instance from an offset and llvm::Type *.
99 MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
100 return MemberInfo(Offset, MemberInfo::Field, Data);
101 }
John McCallf3e86a72015-04-28 00:17:18 +0000102
103 /// The Microsoft bitfield layout rule allocates discrete storage
104 /// units of the field's formal type and only combines adjacent
105 /// fields of the same formal type. We want to emit a layout with
106 /// these discrete storage units instead of combining them into a
107 /// continuous run.
108 bool isDiscreteBitFieldABI() {
Warren Huntfb00c882014-02-21 23:49:50 +0000109 return Context.getTargetInfo().getCXXABI().isMicrosoft() ||
110 D->isMsStruct(Context);
111 }
John McCallf3e86a72015-04-28 00:17:18 +0000112
113 /// The Itanium base layout rule allows virtual bases to overlap
114 /// other bases, which complicates layout in specific ways.
115 ///
116 /// Note specifically that the ms_struct attribute doesn't change this.
117 bool isOverlappingVBaseABI() {
118 return !Context.getTargetInfo().getCXXABI().isMicrosoft();
119 }
120
Warren Huntfb00c882014-02-21 23:49:50 +0000121 /// \brief Wraps llvm::Type::getIntNTy with some implicit arguments.
122 llvm::Type *getIntNType(uint64_t NumBits) {
123 return llvm::Type::getIntNTy(Types.getLLVMContext(),
Rui Ueyama83aa9792016-01-14 21:00:27 +0000124 (unsigned)llvm::alignTo(NumBits, 8));
Warren Huntfb00c882014-02-21 23:49:50 +0000125 }
126 /// \brief Gets an llvm type of size NumBytes and alignment 1.
David Majnemer7a726012014-02-22 00:41:07 +0000127 llvm::Type *getByteArrayType(CharUnits NumBytes) {
Warren Huntfb00c882014-02-21 23:49:50 +0000128 assert(!NumBytes.isZero() && "Empty byte arrays aren't allowed.");
129 llvm::Type *Type = llvm::Type::getInt8Ty(Types.getLLVMContext());
130 return NumBytes == CharUnits::One() ? Type :
131 (llvm::Type *)llvm::ArrayType::get(Type, NumBytes.getQuantity());
132 }
133 /// \brief Gets the storage type for a field decl and handles storage
134 /// for itanium bitfields that are smaller than their declared type.
135 llvm::Type *getStorageType(const FieldDecl *FD) {
136 llvm::Type *Type = Types.ConvertTypeForMem(FD->getType());
John McCallf3e86a72015-04-28 00:17:18 +0000137 if (!FD->isBitField()) return Type;
138 if (isDiscreteBitFieldABI()) return Type;
139 return getIntNType(std::min(FD->getBitWidthValue(Context),
Warren Huntfb00c882014-02-21 23:49:50 +0000140 (unsigned)Context.toBits(getSize(Type))));
141 }
142 /// \brief Gets the llvm Basesubobject type from a CXXRecordDecl.
143 llvm::Type *getStorageType(const CXXRecordDecl *RD) {
144 return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
145 }
146 CharUnits bitsToCharUnits(uint64_t BitOffset) {
147 return Context.toCharUnitsFromBits(BitOffset);
148 }
149 CharUnits getSize(llvm::Type *Type) {
150 return CharUnits::fromQuantity(DataLayout.getTypeAllocSize(Type));
151 }
152 CharUnits getAlignment(llvm::Type *Type) {
153 return CharUnits::fromQuantity(DataLayout.getABITypeAlignment(Type));
154 }
155 bool isZeroInitializable(const FieldDecl *FD) {
David Majnemer67fa0b82015-05-26 21:28:50 +0000156 return Types.isZeroInitializable(FD->getType());
Warren Huntfb00c882014-02-21 23:49:50 +0000157 }
158 bool isZeroInitializable(const RecordDecl *RD) {
David Majnemer67fa0b82015-05-26 21:28:50 +0000159 return Types.isZeroInitializable(RD);
Warren Huntfb00c882014-02-21 23:49:50 +0000160 }
161 void appendPaddingBytes(CharUnits Size) {
162 if (!Size.isZero())
163 FieldTypes.push_back(getByteArrayType(Size));
164 }
165 uint64_t getFieldBitOffset(const FieldDecl *FD) {
166 return Layout.getFieldOffset(FD->getFieldIndex());
167 }
168 // Layout routines.
169 void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset,
170 llvm::Type *StorageType);
171 /// \brief Lowers an ASTRecordLayout to a llvm type.
172 void lower(bool NonVirtualBaseType);
173 void lowerUnion();
174 void accumulateFields();
175 void accumulateBitFields(RecordDecl::field_iterator Field,
176 RecordDecl::field_iterator FieldEnd);
177 void accumulateBases();
178 void accumulateVPtrs();
179 void accumulateVBases();
180 /// \brief Recursively searches all of the bases to find out if a vbase is
181 /// not the primary vbase of some base class.
182 bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query);
183 void calculateZeroInit();
184 /// \brief Lowers bitfield storage types to I8 arrays for bitfields with tail
185 /// padding that is or can potentially be used.
186 void clipTailPadding();
187 /// \brief Determines if we need a packed llvm struct.
David Majnemerbb513002014-09-28 06:39:30 +0000188 void determinePacked(bool NVBaseType);
Alexander Kornienko2a8c18d2018-04-06 15:14:32 +0000189 /// \brief Inserts padding everywhere it's needed.
Warren Huntfb00c882014-02-21 23:49:50 +0000190 void insertPadding();
191 /// \brief Fills out the structures that are ultimately consumed.
192 void fillOutputFields();
193 // Input memoization fields.
194 CodeGenTypes &Types;
195 const ASTContext &Context;
196 const RecordDecl *D;
197 const CXXRecordDecl *RD;
198 const ASTRecordLayout &Layout;
199 const llvm::DataLayout &DataLayout;
200 // Helpful intermediate data-structures.
201 std::vector<MemberInfo> Members;
202 // Output fields, consumed by CodeGenTypes::ComputeRecordLayout.
Chris Lattner0e62c1c2011-07-23 10:55:15 +0000203 SmallVector<llvm::Type *, 16> FieldTypes;
John McCall0217dfc22011-02-15 06:40:56 +0000204 llvm::DenseMap<const FieldDecl *, unsigned> Fields;
John McCall0217dfc22011-02-15 06:40:56 +0000205 llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
John McCall0217dfc22011-02-15 06:40:56 +0000206 llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
207 llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
Warren Huntfb00c882014-02-21 23:49:50 +0000208 bool IsZeroInitializable : 1;
209 bool IsZeroInitializableAsBase : 1;
210 bool Packed : 1;
Daniel Dunbar23ee4b72010-03-31 00:11:27 +0000211private:
Aaron Ballmanabc18922015-02-15 22:54:08 +0000212 CGRecordLowering(const CGRecordLowering &) = delete;
213 void operator =(const CGRecordLowering &) = delete;
Daniel Dunbar23ee4b72010-03-31 00:11:27 +0000214};
Alexander Kornienkoab9db512015-06-22 23:07:51 +0000215} // namespace {
Daniel Dunbar23ee4b72010-03-31 00:11:27 +0000216
David Majnemerbb513002014-09-28 06:39:30 +0000217CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed)
Warren Hunt0afa2d22014-02-22 00:22:15 +0000218 : Types(Types), Context(Types.getContext()), D(D),
219 RD(dyn_cast<CXXRecordDecl>(D)),
Warren Huntfb00c882014-02-21 23:49:50 +0000220 Layout(Types.getContext().getASTRecordLayout(D)),
Warren Hunt0afa2d22014-02-22 00:22:15 +0000221 DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
David Majnemerbb513002014-09-28 06:39:30 +0000222 IsZeroInitializableAsBase(true), Packed(Packed) {}
Warren Huntfb00c882014-02-21 23:49:50 +0000223
224void CGRecordLowering::setBitFieldInfo(
225 const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) {
Richard Smithcd45dbc2014-04-19 03:48:30 +0000226 CGBitFieldInfo &Info = BitFields[FD->getCanonicalDecl()];
Warren Huntfb00c882014-02-21 23:49:50 +0000227 Info.IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
228 Info.Offset = (unsigned)(getFieldBitOffset(FD) - Context.toBits(StartOffset));
229 Info.Size = FD->getBitWidthValue(Context);
230 Info.StorageSize = (unsigned)DataLayout.getTypeAllocSizeInBits(StorageType);
Ulrich Weigand03ce2a12015-07-10 17:30:00 +0000231 Info.StorageOffset = StartOffset;
Warren Huntfb00c882014-02-21 23:49:50 +0000232 if (Info.Size > Info.StorageSize)
233 Info.Size = Info.StorageSize;
234 // Reverse the bit offsets for big endian machines. Because we represent
235 // a bitfield as a single large integer load, we can imagine the bits
236 // counting from the most-significant-bit instead of the
237 // least-significant-bit.
238 if (DataLayout.isBigEndian())
239 Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
Daniel Dunbar23ee4b72010-03-31 00:11:27 +0000240}
Daniel Dunbar23ee4b72010-03-31 00:11:27 +0000241
Warren Huntfb00c882014-02-21 23:49:50 +0000242void CGRecordLowering::lower(bool NVBaseType) {
243 // The lowering process implemented in this function takes a variety of
244 // carefully ordered phases.
245 // 1) Store all members (fields and bases) in a list and sort them by offset.
246 // 2) Add a 1-byte capstone member at the Size of the structure.
247 // 3) Clip bitfield storages members if their tail padding is or might be
248 // used by another field or base. The clipping process uses the capstone
249 // by treating it as another object that occurs after the record.
250 // 4) Determine if the llvm-struct requires packing. It's important that this
251 // phase occur after clipping, because clipping changes the llvm type.
252 // This phase reads the offset of the capstone when determining packedness
253 // and updates the alignment of the capstone to be equal of the alignment
254 // of the record after doing so.
255 // 5) Insert padding everywhere it is needed. This phase requires 'Packed' to
256 // have been computed and needs to know the alignment of the record in
257 // order to understand if explicit tail padding is needed.
258 // 6) Remove the capstone, we don't need it anymore.
259 // 7) Determine if this record can be zero-initialized. This phase could have
260 // been placed anywhere after phase 1.
261 // 8) Format the complete list of members in a way that can be consumed by
262 // CodeGenTypes::ComputeRecordLayout.
263 CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
264 if (D->isUnion())
265 return lowerUnion();
266 accumulateFields();
267 // RD implies C++.
268 if (RD) {
269 accumulateVPtrs();
270 accumulateBases();
271 if (Members.empty())
272 return appendPaddingBytes(Size);
273 if (!NVBaseType)
274 accumulateVBases();
275 }
276 std::stable_sort(Members.begin(), Members.end());
277 Members.push_back(StorageInfo(Size, getIntNType(8)));
278 clipTailPadding();
David Majnemerbb513002014-09-28 06:39:30 +0000279 determinePacked(NVBaseType);
Warren Huntfb00c882014-02-21 23:49:50 +0000280 insertPadding();
281 Members.pop_back();
282 calculateZeroInit();
283 fillOutputFields();
284}
Anders Carlsson28a5fa22009-08-08 19:38:24 +0000285
Warren Huntfb00c882014-02-21 23:49:50 +0000286void CGRecordLowering::lowerUnion() {
Warren Huntfed55972014-03-01 00:38:40 +0000287 CharUnits LayoutSize = Layout.getSize();
Craig Topper8a13c412014-05-21 05:09:00 +0000288 llvm::Type *StorageType = nullptr;
David Majnemerb00ddf32014-10-15 07:57:41 +0000289 bool SeenNamedMember = false;
Warren Huntfb00c882014-02-21 23:49:50 +0000290 // Iterate through the fields setting bitFieldInfo and the Fields array. Also
291 // locate the "most appropriate" storage type. The heuristic for finding the
292 // storage type isn't necessary, the first (non-0-length-bitfield) field's
David Majnemer2e29b402014-10-15 07:57:38 +0000293 // type would work fine and be simpler but would be different than what we've
Warren Huntfb00c882014-02-21 23:49:50 +0000294 // been doing and cause lit tests to change.
Aaron Ballmane8a8bae2014-03-08 20:12:42 +0000295 for (const auto *Field : D->fields()) {
Warren Huntfb00c882014-02-21 23:49:50 +0000296 if (Field->isBitField()) {
Richard Smith866dee42018-04-02 18:29:43 +0000297 if (Field->isZeroLengthBitField(Context))
Warren Huntfb00c882014-02-21 23:49:50 +0000298 continue;
Aaron Ballmane8a8bae2014-03-08 20:12:42 +0000299 llvm::Type *FieldType = getStorageType(Field);
Warren Huntfed55972014-03-01 00:38:40 +0000300 if (LayoutSize < getSize(FieldType))
301 FieldType = getByteArrayType(LayoutSize);
Aaron Ballmane8a8bae2014-03-08 20:12:42 +0000302 setBitFieldInfo(Field, CharUnits::Zero(), FieldType);
Warren Huntfb00c882014-02-21 23:49:50 +0000303 }
Richard Smithcd45dbc2014-04-19 03:48:30 +0000304 Fields[Field->getCanonicalDecl()] = 0;
Aaron Ballmane8a8bae2014-03-08 20:12:42 +0000305 llvm::Type *FieldType = getStorageType(Field);
David Majnemer8476abe2014-10-15 16:36:11 +0000306 // Compute zero-initializable status.
David Majnemerb00ddf32014-10-15 07:57:41 +0000307 // This union might not be zero initialized: it may contain a pointer to
308 // data member which might have some exotic initialization sequence.
309 // If this is the case, then we aught not to try and come up with a "better"
310 // type, it might not be very easy to come up with a Constant which
311 // correctly initializes it.
David Majnemer4e51dfc2015-05-30 09:12:07 +0000312 if (!SeenNamedMember) {
313 SeenNamedMember = Field->getIdentifier();
314 if (!SeenNamedMember)
315 if (const auto *FieldRD =
316 dyn_cast_or_null<RecordDecl>(Field->getType()->getAsTagDecl()))
317 SeenNamedMember = FieldRD->findFirstNamedDataMember();
318 if (SeenNamedMember && !isZeroInitializable(Field)) {
David Majnemer8476abe2014-10-15 16:36:11 +0000319 IsZeroInitializable = IsZeroInitializableAsBase = false;
David Majnemerb00ddf32014-10-15 07:57:41 +0000320 StorageType = FieldType;
321 }
322 }
David Majnemer8476abe2014-10-15 16:36:11 +0000323 // Because our union isn't zero initializable, we won't be getting a better
324 // storage type.
325 if (!IsZeroInitializable)
326 continue;
Warren Huntfb00c882014-02-21 23:49:50 +0000327 // Conditionally update our storage type if we've got a new "better" one.
328 if (!StorageType ||
329 getAlignment(FieldType) > getAlignment(StorageType) ||
330 (getAlignment(FieldType) == getAlignment(StorageType) &&
331 getSize(FieldType) > getSize(StorageType)))
David Majnemer8476abe2014-10-15 16:36:11 +0000332 StorageType = FieldType;
Warren Huntfb00c882014-02-21 23:49:50 +0000333 }
Warren Huntfb00c882014-02-21 23:49:50 +0000334 // If we have no storage type just pad to the appropriate size and return.
335 if (!StorageType)
336 return appendPaddingBytes(LayoutSize);
337 // If our storage size was bigger than our required size (can happen in the
338 // case of packed bitfields on Itanium) then just use an I8 array.
339 if (LayoutSize < getSize(StorageType))
340 StorageType = getByteArrayType(LayoutSize);
341 FieldTypes.push_back(StorageType);
342 appendPaddingBytes(LayoutSize - getSize(StorageType));
343 // Set packed if we need it.
344 if (LayoutSize % getAlignment(StorageType))
345 Packed = true;
346}
347
348void CGRecordLowering::accumulateFields() {
349 for (RecordDecl::field_iterator Field = D->field_begin(),
350 FieldEnd = D->field_end();
351 Field != FieldEnd;)
352 if (Field->isBitField()) {
353 RecordDecl::field_iterator Start = Field;
354 // Iterate to gather the list of bitfields.
355 for (++Field; Field != FieldEnd && Field->isBitField(); ++Field);
356 accumulateBitFields(Start, Field);
357 } else {
358 Members.push_back(MemberInfo(
359 bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
360 getStorageType(*Field), *Field));
361 ++Field;
362 }
363}
364
365void
366CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field,
367 RecordDecl::field_iterator FieldEnd) {
368 // Run stores the first element of the current run of bitfields. FieldEnd is
369 // used as a special value to note that we don't have a current run. A
370 // bitfield run is a contiguous collection of bitfields that can be stored in
371 // the same storage block. Zero-sized bitfields and bitfields that would
372 // cross an alignment boundary break a run and start a new one.
373 RecordDecl::field_iterator Run = FieldEnd;
374 // Tail is the offset of the first bit off the end of the current run. It's
375 // used to determine if the ASTRecordLayout is treating these two bitfields as
376 // contiguous. StartBitOffset is offset of the beginning of the Run.
377 uint64_t StartBitOffset, Tail = 0;
John McCallf3e86a72015-04-28 00:17:18 +0000378 if (isDiscreteBitFieldABI()) {
Warren Huntfb00c882014-02-21 23:49:50 +0000379 for (; Field != FieldEnd; ++Field) {
380 uint64_t BitOffset = getFieldBitOffset(*Field);
381 // Zero-width bitfields end runs.
Richard Smith866dee42018-04-02 18:29:43 +0000382 if (Field->isZeroLengthBitField(Context)) {
Warren Huntfb00c882014-02-21 23:49:50 +0000383 Run = FieldEnd;
384 continue;
385 }
386 llvm::Type *Type = Types.ConvertTypeForMem(Field->getType());
387 // If we don't have a run yet, or don't live within the previous run's
388 // allocated storage then we allocate some storage and start a new run.
389 if (Run == FieldEnd || BitOffset >= Tail) {
390 Run = Field;
391 StartBitOffset = BitOffset;
392 Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
393 // Add the storage member to the record. This must be added to the
394 // record before the bitfield members so that it gets laid out before
395 // the bitfields it contains get laid out.
396 Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
397 }
398 // Bitfields get the offset of their storage but come afterward and remain
399 // there after a stable sort.
400 Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
Craig Topper8a13c412014-05-21 05:09:00 +0000401 MemberInfo::Field, nullptr, *Field));
Warren Huntfb00c882014-02-21 23:49:50 +0000402 }
Anders Carlsson697f6592009-07-23 03:43:54 +0000403 return;
404 }
Wei Mi9b3d6272017-10-16 16:50:27 +0000405
406 // Check if current Field is better as a single field run. When current field
407 // has legal integer width, and its bitfield offset is naturally aligned, it
408 // is better to make the bitfield a separate storage component so as it can be
409 // accessed directly with lower cost.
410 auto IsBetterAsSingleFieldRun = [&](RecordDecl::field_iterator Field) {
411 if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
412 return false;
413 unsigned Width = Field->getBitWidthValue(Context);
414 if (!DataLayout.isLegalInteger(Width))
415 return false;
416 // Make sure Field is natually aligned if it is treated as an IType integer.
417 if (getFieldBitOffset(*Field) %
418 Context.toBits(getAlignment(getIntNType(Width))) !=
419 0)
420 return false;
421 return true;
422 };
423
424 // The start field is better as a single field run.
425 bool StartFieldAsSingleRun = false;
Warren Huntfb00c882014-02-21 23:49:50 +0000426 for (;;) {
427 // Check to see if we need to start a new run.
428 if (Run == FieldEnd) {
429 // If we're out of fields, return.
430 if (Field == FieldEnd)
431 break;
432 // Any non-zero-length bitfield can start a new run.
Richard Smith866dee42018-04-02 18:29:43 +0000433 if (!Field->isZeroLengthBitField(Context)) {
Warren Huntfb00c882014-02-21 23:49:50 +0000434 Run = Field;
435 StartBitOffset = getFieldBitOffset(*Field);
436 Tail = StartBitOffset + Field->getBitWidthValue(Context);
Wei Mi9b3d6272017-10-16 16:50:27 +0000437 StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Run);
Warren Huntfb00c882014-02-21 23:49:50 +0000438 }
439 ++Field;
440 continue;
441 }
Wei Mi9b3d6272017-10-16 16:50:27 +0000442
443 // If the start field of a new run is better as a single run, or
444 // if current field is better as a single run, or
Akira Hatanakafc681ef2018-02-01 03:04:15 +0000445 // if current field has zero width bitfield and either
446 // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to
447 // true, or
Wei Mi9b3d6272017-10-16 16:50:27 +0000448 // if the offset of current field is inconsistent with the offset of
449 // previous field plus its offset,
450 // skip the block below and go ahead to emit the storage.
451 // Otherwise, try to add bitfields to the run.
452 if (!StartFieldAsSingleRun && Field != FieldEnd &&
453 !IsBetterAsSingleFieldRun(Field) &&
Richard Smith866dee42018-04-02 18:29:43 +0000454 (!Field->isZeroLengthBitField(Context) ||
Akira Hatanakafc681ef2018-02-01 03:04:15 +0000455 (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
456 !Context.getTargetInfo().useBitFieldTypeAlignment())) &&
Justin Bogner085c4b22014-08-14 15:44:29 +0000457 Tail == getFieldBitOffset(*Field)) {
Warren Huntfb00c882014-02-21 23:49:50 +0000458 Tail += Field->getBitWidthValue(Context);
459 ++Field;
460 continue;
461 }
Wei Mi9b3d6272017-10-16 16:50:27 +0000462
Warren Huntfb00c882014-02-21 23:49:50 +0000463 // We've hit a break-point in the run and need to emit a storage field.
464 llvm::Type *Type = getIntNType(Tail - StartBitOffset);
465 // Add the storage member to the record and set the bitfield info for all of
466 // the bitfields in the run. Bitfields get the offset of their storage but
467 // come afterward and remain there after a stable sort.
468 Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
469 for (; Run != Field; ++Run)
470 Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
Craig Topper8a13c412014-05-21 05:09:00 +0000471 MemberInfo::Field, nullptr, *Run));
Warren Huntfb00c882014-02-21 23:49:50 +0000472 Run = FieldEnd;
Wei Mi9b3d6272017-10-16 16:50:27 +0000473 StartFieldAsSingleRun = false;
Warren Huntfb00c882014-02-21 23:49:50 +0000474 }
475}
Anders Carlsson68e0b682009-08-08 18:23:56 +0000476
Warren Huntfb00c882014-02-21 23:49:50 +0000477void CGRecordLowering::accumulateBases() {
478 // If we've got a primary virtual base, we need to add it with the bases.
Warren Huntf0ffdb22014-04-25 21:56:30 +0000479 if (Layout.isPrimaryBaseVirtual()) {
480 const CXXRecordDecl *BaseDecl = Layout.getPrimaryBase();
481 Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::Base,
482 getStorageType(BaseDecl), BaseDecl));
483 }
Warren Huntfb00c882014-02-21 23:49:50 +0000484 // Accumulate the non-virtual bases.
Aaron Ballman574705e2014-03-13 15:41:46 +0000485 for (const auto &Base : RD->bases()) {
486 if (Base.isVirtual())
Warren Huntfb00c882014-02-21 23:49:50 +0000487 continue;
John McCall9fc700e2015-04-26 04:43:26 +0000488
489 // Bases can be zero-sized even if not technically empty if they
490 // contain only a trailing array member.
Aaron Ballman574705e2014-03-13 15:41:46 +0000491 const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
John McCall9fc700e2015-04-26 04:43:26 +0000492 if (!BaseDecl->isEmpty() &&
David Majnemer78945d02015-10-22 18:04:22 +0000493 !Context.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
Warren Huntfb00c882014-02-21 23:49:50 +0000494 Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
495 MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
496 }
497}
498
499void CGRecordLowering::accumulateVPtrs() {
500 if (Layout.hasOwnVFPtr())
501 Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
502 llvm::FunctionType::get(getIntNType(32), /*isVarArg=*/true)->
503 getPointerTo()->getPointerTo()));
504 if (Layout.hasOwnVBPtr())
505 Members.push_back(MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr,
506 llvm::Type::getInt32PtrTy(Types.getLLVMContext())));
507}
508
509void CGRecordLowering::accumulateVBases() {
Warren Huntf0ffdb22014-04-25 21:56:30 +0000510 CharUnits ScissorOffset = Layout.getNonVirtualSize();
511 // In the itanium ABI, it's possible to place a vbase at a dsize that is
512 // smaller than the nvsize. Here we check to see if such a base is placed
513 // before the nvsize and set the scissor offset to that, instead of the
514 // nvsize.
John McCallf3e86a72015-04-28 00:17:18 +0000515 if (isOverlappingVBaseABI())
Warren Huntf0ffdb22014-04-25 21:56:30 +0000516 for (const auto &Base : RD->vbases()) {
517 const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
518 if (BaseDecl->isEmpty())
519 continue;
520 // If the vbase is a primary virtual base of some base, then it doesn't
521 // get its own storage location but instead lives inside of that base.
522 if (Context.isNearlyEmpty(BaseDecl) && !hasOwnStorage(RD, BaseDecl))
523 continue;
524 ScissorOffset = std::min(ScissorOffset,
525 Layout.getVBaseClassOffset(BaseDecl));
526 }
Craig Topper8a13c412014-05-21 05:09:00 +0000527 Members.push_back(MemberInfo(ScissorOffset, MemberInfo::Scissor, nullptr,
528 RD));
Aaron Ballman445a9392014-03-13 16:15:17 +0000529 for (const auto &Base : RD->vbases()) {
530 const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
Warren Huntfb00c882014-02-21 23:49:50 +0000531 if (BaseDecl->isEmpty())
532 continue;
533 CharUnits Offset = Layout.getVBaseClassOffset(BaseDecl);
534 // If the vbase is a primary virtual base of some base, then it doesn't
535 // get its own storage location but instead lives inside of that base.
John McCallf3e86a72015-04-28 00:17:18 +0000536 if (isOverlappingVBaseABI() &&
537 Context.isNearlyEmpty(BaseDecl) &&
Warren Huntfb00c882014-02-21 23:49:50 +0000538 !hasOwnStorage(RD, BaseDecl)) {
Craig Topper8a13c412014-05-21 05:09:00 +0000539 Members.push_back(MemberInfo(Offset, MemberInfo::VBase, nullptr,
540 BaseDecl));
Warren Huntfb00c882014-02-21 23:49:50 +0000541 continue;
542 }
543 // If we've got a vtordisp, add it as a storage type.
544 if (Layout.getVBaseOffsetsMap().find(BaseDecl)->second.hasVtorDisp())
545 Members.push_back(StorageInfo(Offset - CharUnits::fromQuantity(4),
546 getIntNType(32)));
547 Members.push_back(MemberInfo(Offset, MemberInfo::VBase,
548 getStorageType(BaseDecl), BaseDecl));
549 }
550}
551
552bool CGRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl,
553 const CXXRecordDecl *Query) {
554 const ASTRecordLayout &DeclLayout = Context.getASTRecordLayout(Decl);
555 if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query)
556 return false;
Aaron Ballman574705e2014-03-13 15:41:46 +0000557 for (const auto &Base : Decl->bases())
558 if (!hasOwnStorage(Base.getType()->getAsCXXRecordDecl(), Query))
Warren Huntfb00c882014-02-21 23:49:50 +0000559 return false;
560 return true;
561}
562
563void CGRecordLowering::calculateZeroInit() {
564 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
565 MemberEnd = Members.end();
566 IsZeroInitializableAsBase && Member != MemberEnd; ++Member) {
567 if (Member->Kind == MemberInfo::Field) {
568 if (!Member->FD || isZeroInitializable(Member->FD))
569 continue;
570 IsZeroInitializable = IsZeroInitializableAsBase = false;
571 } else if (Member->Kind == MemberInfo::Base ||
572 Member->Kind == MemberInfo::VBase) {
573 if (isZeroInitializable(Member->RD))
574 continue;
575 IsZeroInitializable = false;
576 if (Member->Kind == MemberInfo::Base)
577 IsZeroInitializableAsBase = false;
578 }
579 }
580}
581
582void CGRecordLowering::clipTailPadding() {
583 std::vector<MemberInfo>::iterator Prior = Members.begin();
584 CharUnits Tail = getSize(Prior->Data);
585 for (std::vector<MemberInfo>::iterator Member = Prior + 1,
586 MemberEnd = Members.end();
587 Member != MemberEnd; ++Member) {
588 // Only members with data and the scissor can cut into tail padding.
589 if (!Member->Data && Member->Kind != MemberInfo::Scissor)
590 continue;
591 if (Member->Offset < Tail) {
592 assert(Prior->Kind == MemberInfo::Field && !Prior->FD &&
593 "Only storage fields have tail padding!");
Rui Ueyama83aa9792016-01-14 21:00:27 +0000594 Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
Warren Huntfb00c882014-02-21 23:49:50 +0000595 cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
596 }
597 if (Member->Data)
598 Prior = Member;
599 Tail = Prior->Offset + getSize(Prior->Data);
600 }
601}
602
David Majnemerbb513002014-09-28 06:39:30 +0000603void CGRecordLowering::determinePacked(bool NVBaseType) {
604 if (Packed)
605 return;
Warren Huntfb00c882014-02-21 23:49:50 +0000606 CharUnits Alignment = CharUnits::One();
David Majnemerbb513002014-09-28 06:39:30 +0000607 CharUnits NVAlignment = CharUnits::One();
608 CharUnits NVSize =
609 !NVBaseType && RD ? Layout.getNonVirtualSize() : CharUnits::Zero();
Warren Huntfb00c882014-02-21 23:49:50 +0000610 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
611 MemberEnd = Members.end();
612 Member != MemberEnd; ++Member) {
613 if (!Member->Data)
614 continue;
615 // If any member falls at an offset that it not a multiple of its alignment,
616 // then the entire record must be packed.
617 if (Member->Offset % getAlignment(Member->Data))
618 Packed = true;
David Majnemerbb513002014-09-28 06:39:30 +0000619 if (Member->Offset < NVSize)
620 NVAlignment = std::max(NVAlignment, getAlignment(Member->Data));
Warren Huntfb00c882014-02-21 23:49:50 +0000621 Alignment = std::max(Alignment, getAlignment(Member->Data));
622 }
623 // If the size of the record (the capstone's offset) is not a multiple of the
624 // record's alignment, it must be packed.
625 if (Members.back().Offset % Alignment)
626 Packed = true;
David Majnemerbb513002014-09-28 06:39:30 +0000627 // If the non-virtual sub-object is not a multiple of the non-virtual
628 // sub-object's alignment, it must be packed. We cannot have a packed
629 // non-virtual sub-object and an unpacked complete object or vise versa.
630 if (NVSize % NVAlignment)
631 Packed = true;
Alexander Kornienko2a8c18d2018-04-06 15:14:32 +0000632 // Update the alignment of the sentinel.
Warren Huntfb00c882014-02-21 23:49:50 +0000633 if (!Packed)
634 Members.back().Data = getIntNType(Context.toBits(Alignment));
635}
636
637void CGRecordLowering::insertPadding() {
638 std::vector<std::pair<CharUnits, CharUnits> > Padding;
639 CharUnits Size = CharUnits::Zero();
640 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
641 MemberEnd = Members.end();
642 Member != MemberEnd; ++Member) {
643 if (!Member->Data)
644 continue;
645 CharUnits Offset = Member->Offset;
646 assert(Offset >= Size);
647 // Insert padding if we need to.
Rui Ueyama83aa9792016-01-14 21:00:27 +0000648 if (Offset !=
649 Size.alignTo(Packed ? CharUnits::One() : getAlignment(Member->Data)))
Warren Huntfb00c882014-02-21 23:49:50 +0000650 Padding.push_back(std::make_pair(Size, Offset - Size));
651 Size = Offset + getSize(Member->Data);
652 }
653 if (Padding.empty())
Anders Carlsson307846f2009-07-23 03:17:50 +0000654 return;
Warren Huntfb00c882014-02-21 23:49:50 +0000655 // Add the padding to the Members list and sort it.
656 for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
657 Pad = Padding.begin(), PadEnd = Padding.end();
658 Pad != PadEnd; ++Pad)
659 Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
660 std::stable_sort(Members.begin(), Members.end());
661}
Mike Stump11289f42009-09-09 15:08:12 +0000662
Warren Huntfb00c882014-02-21 23:49:50 +0000663void CGRecordLowering::fillOutputFields() {
664 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
665 MemberEnd = Members.end();
666 Member != MemberEnd; ++Member) {
667 if (Member->Data)
668 FieldTypes.push_back(Member->Data);
669 if (Member->Kind == MemberInfo::Field) {
670 if (Member->FD)
Richard Smithcd45dbc2014-04-19 03:48:30 +0000671 Fields[Member->FD->getCanonicalDecl()] = FieldTypes.size() - 1;
Warren Huntfb00c882014-02-21 23:49:50 +0000672 // A field without storage must be a bitfield.
673 if (!Member->Data)
674 setBitFieldInfo(Member->FD, Member->Offset, FieldTypes.back());
675 } else if (Member->Kind == MemberInfo::Base)
676 NonVirtualBases[Member->RD] = FieldTypes.size() - 1;
677 else if (Member->Kind == MemberInfo::VBase)
678 VirtualBases[Member->RD] = FieldTypes.size() - 1;
679 }
Anders Carlsson307846f2009-07-23 03:17:50 +0000680}
681
Daniel Dunbarc7f9bba2010-09-02 23:53:28 +0000682CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
Chandler Carruthff0e3a12012-12-06 11:14:44 +0000683 const FieldDecl *FD,
684 uint64_t Offset, uint64_t Size,
685 uint64_t StorageSize,
Ulrich Weigand03ce2a12015-07-10 17:30:00 +0000686 CharUnits StorageOffset) {
Warren Huntfb00c882014-02-21 23:49:50 +0000687 // This function is vestigial from CGRecordLayoutBuilder days but is still
688 // used in GCObjCRuntime.cpp. That usage has a "fixme" attached to it that
689 // when addressed will allow for the removal of this function.
Chris Lattner2192fe52011-07-18 04:24:23 +0000690 llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
John McCall8a3c5552011-02-26 08:41:59 +0000691 CharUnits TypeSizeInBytes =
Micah Villmowdd31ca12012-10-08 16:25:52 +0000692 CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
John McCall8a3c5552011-02-26 08:41:59 +0000693 uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
Daniel Dunbarf9c24f82010-04-12 21:01:28 +0000694
Douglas Gregor6ab2fa82011-05-20 16:38:50 +0000695 bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
Daniel Dunbarf9c24f82010-04-12 21:01:28 +0000696
Chandler Carruthff0e3a12012-12-06 11:14:44 +0000697 if (Size > TypeSizeInBits) {
Anders Carlssond5f27b02010-04-17 22:54:57 +0000698 // We have a wide bit-field. The extra bits are only used for padding, so
699 // if we have a bitfield of type T, with size N:
700 //
701 // T t : N;
702 //
703 // We can just assume that it's:
704 //
705 // T t : sizeof(T);
706 //
Chandler Carruthff0e3a12012-12-06 11:14:44 +0000707 Size = TypeSizeInBits;
Anders Carlssonbe6f3182010-04-16 16:23:02 +0000708 }
709
Chandler Carruthfd8eca22012-12-09 07:26:04 +0000710 // Reverse the bit offsets for big endian machines. Because we represent
711 // a bitfield as a single large integer load, we can imagine the bits
712 // counting from the most-significant-bit instead of the
713 // least-significant-bit.
714 if (Types.getDataLayout().isBigEndian()) {
715 Offset = StorageSize - (Offset + Size);
716 }
Chris Lattnerfb59c7c2011-02-17 22:09:58 +0000717
Ulrich Weigand03ce2a12015-07-10 17:30:00 +0000718 return CGBitFieldInfo(Offset, Size, IsSigned, StorageSize, StorageOffset);
Daniel Dunbarf9c24f82010-04-12 21:01:28 +0000719}
720
Chris Lattnera5f58b02011-07-09 17:41:47 +0000721CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
722 llvm::StructType *Ty) {
David Majnemerbb513002014-09-28 06:39:30 +0000723 CGRecordLowering Builder(*this, D, /*Packed=*/false);
Mike Stump11289f42009-09-09 15:08:12 +0000724
David Majnemerbb513002014-09-28 06:39:30 +0000725 Builder.lower(/*NonVirtualBaseType=*/false);
Anders Carlssone1d5ca52009-07-24 15:20:52 +0000726
John McCall0217dfc22011-02-15 06:40:56 +0000727 // If we're in C++, compute the base subobject type.
Craig Topper8a13c412014-05-21 05:09:00 +0000728 llvm::StructType *BaseTy = nullptr;
Warren Huntfb00c882014-02-21 23:49:50 +0000729 if (isa<CXXRecordDecl>(D) && !D->isUnion() && !D->hasAttr<FinalAttr>()) {
730 BaseTy = Ty;
731 if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
David Majnemerbb513002014-09-28 06:39:30 +0000732 CGRecordLowering BaseBuilder(*this, D, /*Packed=*/Builder.Packed);
733 BaseBuilder.lower(/*NonVirtualBaseType=*/true);
Warren Huntfb00c882014-02-21 23:49:50 +0000734 BaseTy = llvm::StructType::create(
735 getLLVMContext(), BaseBuilder.FieldTypes, "", BaseBuilder.Packed);
736 addRecordTypeName(D, BaseTy, ".base");
David Majnemerbb513002014-09-28 06:39:30 +0000737 // BaseTy and Ty must agree on their packedness for getLLVMFieldNo to work
738 // on both of them with the same index.
739 assert(Builder.Packed == BaseBuilder.Packed &&
740 "Non-virtual and complete types must agree on packedness");
Warren Huntfb00c882014-02-21 23:49:50 +0000741 }
Anders Carlssonc1351ca2010-11-09 05:25:47 +0000742 }
743
Reid Klecknerc497f1d2014-02-27 00:03:39 +0000744 // Fill in the struct *after* computing the base type. Filling in the body
745 // signifies that the type is no longer opaque and record layout is complete,
746 // but we may need to recursively layout D while laying D out as a base type.
747 Ty->setBody(Builder.FieldTypes, Builder.Packed);
748
Daniel Dunbar034299e2010-03-31 01:09:11 +0000749 CGRecordLayout *RL =
John McCall0217dfc22011-02-15 06:40:56 +0000750 new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
Warren Huntfb00c882014-02-21 23:49:50 +0000751 Builder.IsZeroInitializableAsBase);
Daniel Dunbar034299e2010-03-31 01:09:11 +0000752
John McCall0217dfc22011-02-15 06:40:56 +0000753 RL->NonVirtualBases.swap(Builder.NonVirtualBases);
754 RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
Anders Carlsson061ca522010-05-18 05:22:06 +0000755
Anders Carlsson307846f2009-07-23 03:17:50 +0000756 // Add all the field numbers.
John McCall0217dfc22011-02-15 06:40:56 +0000757 RL->FieldInfo.swap(Builder.Fields);
Anders Carlsson307846f2009-07-23 03:17:50 +0000758
759 // Add bitfield info.
John McCall0217dfc22011-02-15 06:40:56 +0000760 RL->BitFields.swap(Builder.BitFields);
Mike Stump11289f42009-09-09 15:08:12 +0000761
Daniel Dunbar2ea51832010-04-19 20:44:47 +0000762 // Dump the layout, if requested.
David Blaikiebbafb8a2012-03-11 07:00:24 +0000763 if (getContext().getLangOpts().DumpRecordLayouts) {
Argyrios Kyrtzidis8ade08e2013-07-12 22:30:03 +0000764 llvm::outs() << "\n*** Dumping IRgen Record Layout\n";
765 llvm::outs() << "Record: ";
766 D->dump(llvm::outs());
767 llvm::outs() << "\nLayout: ";
768 RL->print(llvm::outs());
Daniel Dunbarb935b932010-04-13 20:58:55 +0000769 }
Daniel Dunbarb97bff92010-04-12 18:14:18 +0000770
Daniel Dunbar488f55c2010-04-22 02:35:46 +0000771#ifndef NDEBUG
Daniel Dunbar2ea51832010-04-19 20:44:47 +0000772 // Verify that the computed LLVM struct size matches the AST layout size.
Anders Carlssonc1351ca2010-11-09 05:25:47 +0000773 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
774
Ken Dyckb0fcc592011-02-11 01:54:29 +0000775 uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
Micah Villmowdd31ca12012-10-08 16:25:52 +0000776 assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) &&
Daniel Dunbar2ea51832010-04-19 20:44:47 +0000777 "Type size mismatch!");
778
Anders Carlssonc1351ca2010-11-09 05:25:47 +0000779 if (BaseTy) {
Ken Dyckbec02852011-02-08 02:02:47 +0000780 CharUnits NonVirtualSize = Layout.getNonVirtualSize();
Ken Dyckbec02852011-02-08 02:02:47 +0000781
782 uint64_t AlignedNonVirtualTypeSizeInBits =
Warren Huntfb00c882014-02-21 23:49:50 +0000783 getContext().toBits(NonVirtualSize);
Anders Carlssonc1351ca2010-11-09 05:25:47 +0000784
785 assert(AlignedNonVirtualTypeSizeInBits ==
Micah Villmowdd31ca12012-10-08 16:25:52 +0000786 getDataLayout().getTypeAllocSizeInBits(BaseTy) &&
Anders Carlssonc1351ca2010-11-09 05:25:47 +0000787 "Type size mismatch!");
788 }
789
Daniel Dunbar2ba67442010-04-21 19:10:49 +0000790 // Verify that the LLVM and AST field offsets agree.
George Burgess IV00f70bd2018-03-01 05:43:23 +0000791 llvm::StructType *ST = RL->getLLVMType();
Micah Villmowdd31ca12012-10-08 16:25:52 +0000792 const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST);
Daniel Dunbar2ba67442010-04-21 19:10:49 +0000793
794 const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
795 RecordDecl::field_iterator it = D->field_begin();
796 for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
David Blaikie40ed2972012-06-06 20:45:41 +0000797 const FieldDecl *FD = *it;
Daniel Dunbar488f55c2010-04-22 02:35:46 +0000798
799 // For non-bit-fields, just check that the LLVM struct offset matches the
800 // AST offset.
801 if (!FD->isBitField()) {
Daniel Dunbar2ba67442010-04-21 19:10:49 +0000802 unsigned FieldNo = RL->getLLVMFieldNo(FD);
803 assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
804 "Invalid field offset!");
Daniel Dunbar488f55c2010-04-22 02:35:46 +0000805 continue;
806 }
Fariborz Jahanianbcb23a12011-04-26 23:52:16 +0000807
Daniel Dunbar488f55c2010-04-22 02:35:46 +0000808 // Ignore unnamed bit-fields.
Eli Friedman2782dac2013-06-26 20:50:34 +0000809 if (!FD->getDeclName())
Daniel Dunbar488f55c2010-04-22 02:35:46 +0000810 continue;
Daniel Dunbar488f55c2010-04-22 02:35:46 +0000811
Chandler Carruthff0e3a12012-12-06 11:14:44 +0000812 // Don't inspect zero-length bitfields.
Richard Smith866dee42018-04-02 18:29:43 +0000813 if (FD->isZeroLengthBitField(getContext()))
Chandler Carruthff0e3a12012-12-06 11:14:44 +0000814 continue;
815
Chandler Carruthff0e3a12012-12-06 11:14:44 +0000816 const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
Chandler Carruthed72cdc2012-12-09 10:33:27 +0000817 llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
818
Chandler Carruthff0e3a12012-12-06 11:14:44 +0000819 // Unions have overlapping elements dictating their layout, but for
820 // non-unions we can verify that this section of the layout is the exact
Chandler Carruthed72cdc2012-12-09 10:33:27 +0000821 // expected size.
Chandler Carruthff0e3a12012-12-06 11:14:44 +0000822 if (D->isUnion()) {
Chandler Carruthed72cdc2012-12-09 10:33:27 +0000823 // For unions we verify that the start is zero and the size
824 // is in-bounds. However, on BE systems, the offset may be non-zero, but
825 // the size + offset should match the storage size in that case as it
826 // "starts" at the back.
827 if (getDataLayout().isBigEndian())
David Greene464d2192013-01-15 23:13:49 +0000828 assert(static_cast<unsigned>(Info.Offset + Info.Size) ==
829 Info.StorageSize &&
Chandler Carruthed72cdc2012-12-09 10:33:27 +0000830 "Big endian union bitfield does not end at the back");
831 else
832 assert(Info.Offset == 0 &&
833 "Little endian union bitfield with a non-zero offset");
Chandler Carruthff0e3a12012-12-06 11:14:44 +0000834 assert(Info.StorageSize <= SL->getSizeInBits() &&
835 "Union not large enough for bitfield storage");
836 } else {
837 assert(Info.StorageSize ==
838 getDataLayout().getTypeAllocSizeInBits(ElementTy) &&
839 "Storage size does not match the element type size");
Daniel Dunbar2ba67442010-04-21 19:10:49 +0000840 }
Chandler Carruthff0e3a12012-12-06 11:14:44 +0000841 assert(Info.Size > 0 && "Empty bitfield!");
Eli Bendersky76bd3d82012-12-18 18:53:14 +0000842 assert(static_cast<unsigned>(Info.Offset) + Info.Size <= Info.StorageSize &&
Chandler Carruthff0e3a12012-12-06 11:14:44 +0000843 "Bitfield outside of its allocated storage");
Daniel Dunbar2ba67442010-04-21 19:10:49 +0000844 }
845#endif
Daniel Dunbar2ea51832010-04-19 20:44:47 +0000846
Daniel Dunbar034299e2010-03-31 01:09:11 +0000847 return RL;
Anders Carlsson307846f2009-07-23 03:17:50 +0000848}
Daniel Dunbarb97bff92010-04-12 18:14:18 +0000849
Chris Lattner0e62c1c2011-07-23 10:55:15 +0000850void CGRecordLayout::print(raw_ostream &OS) const {
Daniel Dunbarb97bff92010-04-12 18:14:18 +0000851 OS << "<CGRecordLayout\n";
John McCall0217dfc22011-02-15 06:40:56 +0000852 OS << " LLVMType:" << *CompleteObjectType << "\n";
853 if (BaseSubobjectType)
854 OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
John McCall614dbdc2010-08-22 21:01:12 +0000855 OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
Daniel Dunbarb97bff92010-04-12 18:14:18 +0000856 OS << " BitFields:[\n";
Daniel Dunbarb6f4b052010-04-22 02:35:36 +0000857
858 // Print bit-field infos in declaration order.
859 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
Daniel Dunbarb97bff92010-04-12 18:14:18 +0000860 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
861 it = BitFields.begin(), ie = BitFields.end();
862 it != ie; ++it) {
Daniel Dunbarb6f4b052010-04-22 02:35:36 +0000863 const RecordDecl *RD = it->first->getParent();
864 unsigned Index = 0;
865 for (RecordDecl::field_iterator
David Blaikie40ed2972012-06-06 20:45:41 +0000866 it2 = RD->field_begin(); *it2 != it->first; ++it2)
Daniel Dunbarb6f4b052010-04-22 02:35:36 +0000867 ++Index;
868 BFIs.push_back(std::make_pair(Index, &it->second));
869 }
870 llvm::array_pod_sort(BFIs.begin(), BFIs.end());
871 for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
Daniel Dunbarb935b932010-04-13 20:58:55 +0000872 OS.indent(4);
Daniel Dunbarb6f4b052010-04-22 02:35:36 +0000873 BFIs[i].second->print(OS);
Daniel Dunbarb97bff92010-04-12 18:14:18 +0000874 OS << "\n";
875 }
Daniel Dunbarb6f4b052010-04-22 02:35:36 +0000876
Daniel Dunbarb97bff92010-04-12 18:14:18 +0000877 OS << "]>\n";
878}
879
Yaron Kerencdae9412016-01-29 19:38:18 +0000880LLVM_DUMP_METHOD void CGRecordLayout::dump() const {
Daniel Dunbarb97bff92010-04-12 18:14:18 +0000881 print(llvm::errs());
882}
883
Chris Lattner0e62c1c2011-07-23 10:55:15 +0000884void CGBitFieldInfo::print(raw_ostream &OS) const {
Chandler Carruthff0e3a12012-12-06 11:14:44 +0000885 OS << "<CGBitFieldInfo"
886 << " Offset:" << Offset
887 << " Size:" << Size
888 << " IsSigned:" << IsSigned
889 << " StorageSize:" << StorageSize
Ulrich Weigand03ce2a12015-07-10 17:30:00 +0000890 << " StorageOffset:" << StorageOffset.getQuantity() << ">";
Daniel Dunbarb97bff92010-04-12 18:14:18 +0000891}
892
Yaron Kerencdae9412016-01-29 19:38:18 +0000893LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const {
Daniel Dunbarb97bff92010-04-12 18:14:18 +0000894 print(llvm::errs());
895}