blob: 5571e43f7859fc253afcb1d40570be60ac1a3110 [file] [log] [blame]
Daniel Dunbar270e2032010-03-31 00:11:27 +00001//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
Anders Carlsson45372a62009-07-23 03:17:50 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
Daniel Dunbar270e2032010-03-31 00:11:27 +000010// Builder implementation for CGRecordLayout objects.
Anders Carlsson45372a62009-07-23 03:17:50 +000011//
12//===----------------------------------------------------------------------===//
13
Daniel Dunbar2924ade2010-03-30 22:26:10 +000014#include "CGRecordLayout.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000015#include "clang/AST/ASTContext.h"
16#include "clang/AST/Attr.h"
Anders Carlsson46170f92010-11-24 22:50:27 +000017#include "clang/AST/CXXInheritance.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000018#include "clang/AST/DeclCXX.h"
19#include "clang/AST/Expr.h"
20#include "clang/AST/RecordLayout.h"
21#include "CodeGenTypes.h"
John McCallf16aa102010-08-22 21:01:12 +000022#include "CGCXXABI.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000023#include "llvm/DerivedTypes.h"
Daniel Dunbar93c62962010-04-12 18:14:18 +000024#include "llvm/Type.h"
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +000025#include "llvm/Support/Debug.h"
Daniel Dunbar93c62962010-04-12 18:14:18 +000026#include "llvm/Support/raw_ostream.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000027#include "llvm/Target/TargetData.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000028using namespace clang;
29using namespace CodeGen;
30
John McCalld0de0ce2010-11-30 23:17:27 +000031namespace {
Daniel Dunbar270e2032010-03-31 00:11:27 +000032
33class CGRecordLayoutBuilder {
34public:
35 /// FieldTypes - Holds the LLVM types that the struct is created from.
36 std::vector<const llvm::Type *> FieldTypes;
37
Anders Carlsson3d155e62010-11-09 05:25:47 +000038 /// NonVirtualBaseFieldTypes - Holds the LLVM types for the non-virtual part
39 /// of the struct. For example, consider:
40 ///
41 /// struct A { int i; };
42 /// struct B { void *v; };
43 /// struct C : virtual A, B { };
44 ///
45 /// The LLVM type of C will be
46 /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
47 ///
48 /// And the LLVM type of the non-virtual base struct will be
49 /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
50 std::vector<const llvm::Type *> NonVirtualBaseFieldTypes;
51
52 /// NonVirtualBaseTypeIsSameAsCompleteType - Whether the non-virtual part of
53 /// the struct is equivalent to the complete struct.
54 bool NonVirtualBaseTypeIsSameAsCompleteType;
55
Daniel Dunbar270e2032010-03-31 00:11:27 +000056 /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number.
57 typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo;
58 llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields;
59
60 /// LLVMBitFieldInfo - Holds location and size information about a bit field.
Daniel Dunbarc7a984a2010-04-06 01:07:41 +000061 typedef std::pair<const FieldDecl *, CGBitFieldInfo> LLVMBitFieldInfo;
Daniel Dunbar270e2032010-03-31 00:11:27 +000062 llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
63
Anders Carlssonc6772ce2010-05-18 05:22:06 +000064 typedef std::pair<const CXXRecordDecl *, unsigned> LLVMBaseInfo;
65 llvm::SmallVector<LLVMBaseInfo, 16> LLVMNonVirtualBases;
Anders Carlsson46170f92010-11-24 22:50:27 +000066
67 /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
68 /// primary base classes for some other direct or indirect base class.
69 CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
70
Anders Carlsson1d7dc222010-11-28 19:18:44 +000071 /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid
72 /// avoid laying out virtual bases more than once.
73 llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases;
74
John McCallf16aa102010-08-22 21:01:12 +000075 /// IsZeroInitializable - Whether this struct can be C++
76 /// zero-initialized with an LLVM zeroinitializer.
77 bool IsZeroInitializable;
Daniel Dunbar270e2032010-03-31 00:11:27 +000078
79 /// Packed - Whether the resulting LLVM struct will be packed or not.
80 bool Packed;
81
82private:
83 CodeGenTypes &Types;
84
85 /// Alignment - Contains the alignment of the RecordDecl.
86 //
87 // FIXME: This is not needed and should be removed.
88 unsigned Alignment;
89
Daniel Dunbar270e2032010-03-31 00:11:27 +000090 /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
91 /// this will have the number of bits still available in the field.
92 char BitsAvailableInLastField;
93
94 /// NextFieldOffsetInBytes - Holds the next field offset in bytes.
95 uint64_t NextFieldOffsetInBytes;
96
Anders Carlsson86664462010-04-17 20:49:27 +000097 /// LayoutUnionField - Will layout a field in an union and return the type
98 /// that the field will have.
99 const llvm::Type *LayoutUnionField(const FieldDecl *Field,
100 const ASTRecordLayout &Layout);
101
Daniel Dunbar270e2032010-03-31 00:11:27 +0000102 /// LayoutUnion - Will layout a union RecordDecl.
103 void LayoutUnion(const RecordDecl *D);
104
105 /// LayoutField - try to layout all fields in the record decl.
106 /// Returns false if the operation failed because the struct is not packed.
107 bool LayoutFields(const RecordDecl *D);
108
Anders Carlsson860453c2010-12-04 23:59:48 +0000109 /// Layout a single base, virtual or non-virtual
110 void LayoutBase(const CXXRecordDecl *BaseDecl, uint64_t BaseOffset);
111
Anders Carlsson8f2c6892010-11-25 01:59:35 +0000112 /// LayoutVirtualBase - layout a single virtual base.
113 void LayoutVirtualBase(const CXXRecordDecl *BaseDecl, uint64_t BaseOffset);
114
Anders Carlsson1d7dc222010-11-28 19:18:44 +0000115 /// LayoutVirtualBases - layout the virtual bases of a record decl.
116 void LayoutVirtualBases(const CXXRecordDecl *RD,
117 const ASTRecordLayout &Layout);
118
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000119 /// LayoutNonVirtualBase - layout a single non-virtual base.
120 void LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
121 uint64_t BaseOffset);
122
Anders Carlsson1d7dc222010-11-28 19:18:44 +0000123 /// LayoutNonVirtualBases - layout the virtual bases of a record decl.
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000124 void LayoutNonVirtualBases(const CXXRecordDecl *RD,
125 const ASTRecordLayout &Layout);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000126
Anders Carlsson3d155e62010-11-09 05:25:47 +0000127 /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
Argyrios Kyrtzidisdb2b42f2010-12-10 00:11:00 +0000128 bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
Anders Carlsson3d155e62010-11-09 05:25:47 +0000129
Daniel Dunbar270e2032010-03-31 00:11:27 +0000130 /// LayoutField - layout a single field. Returns false if the operation failed
131 /// because the current struct is not packed.
132 bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
133
134 /// LayoutBitField - layout a single bit field.
135 void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
136
137 /// AppendField - Appends a field with the given offset and type.
138 void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
139
Daniel Dunbar270e2032010-03-31 00:11:27 +0000140 /// AppendPadding - Appends enough padding bytes so that the total
141 /// struct size is a multiple of the field alignment.
Anders Carlsson57d2d232010-12-04 23:53:18 +0000142 void AppendPadding(uint64_t FieldOffsetInBytes,
143 unsigned FieldAlignmentInBytes);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000144
Anders Carlsson3d155e62010-11-09 05:25:47 +0000145 /// getByteArrayType - Returns a byte array type with the given number of
146 /// elements.
147 const llvm::Type *getByteArrayType(uint64_t NumBytes);
148
Daniel Dunbar270e2032010-03-31 00:11:27 +0000149 /// AppendBytes - Append a given number of bytes to the record.
150 void AppendBytes(uint64_t NumBytes);
151
152 /// AppendTailPadding - Append enough tail padding so that the type will have
153 /// the passed size.
154 void AppendTailPadding(uint64_t RecordSize);
155
156 unsigned getTypeAlignment(const llvm::Type *Ty) const;
Daniel Dunbar270e2032010-03-31 00:11:27 +0000157
Anders Carlssonfc86d552010-11-28 23:06:23 +0000158 /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
159 /// LLVM element types.
160 unsigned getAlignmentAsLLVMStruct() const;
161
John McCallf16aa102010-08-22 21:01:12 +0000162 /// CheckZeroInitializable - Check if the given type contains a pointer
Daniel Dunbar270e2032010-03-31 00:11:27 +0000163 /// to data member.
John McCallf16aa102010-08-22 21:01:12 +0000164 void CheckZeroInitializable(QualType T);
165 void CheckZeroInitializable(const CXXRecordDecl *RD);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000166
167public:
168 CGRecordLayoutBuilder(CodeGenTypes &Types)
Anders Carlsson3d155e62010-11-09 05:25:47 +0000169 : NonVirtualBaseTypeIsSameAsCompleteType(false), IsZeroInitializable(true),
Anders Carlssonfc86d552010-11-28 23:06:23 +0000170 Packed(false), Types(Types), Alignment(0), BitsAvailableInLastField(0),
171 NextFieldOffsetInBytes(0) { }
Daniel Dunbar270e2032010-03-31 00:11:27 +0000172
173 /// Layout - Will layout a RecordDecl.
174 void Layout(const RecordDecl *D);
175};
176
177}
Daniel Dunbar270e2032010-03-31 00:11:27 +0000178
Anders Carlsson45372a62009-07-23 03:17:50 +0000179void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
Anders Carlssona5dd7222009-08-08 19:38:24 +0000180 Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8;
Anders Carlssond0eb3b92009-09-02 17:51:33 +0000181 Packed = D->hasAttr<PackedAttr>();
Anders Carlssona5dd7222009-08-08 19:38:24 +0000182
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000183 if (D->isUnion()) {
184 LayoutUnion(D);
185 return;
186 }
Anders Carlssona860e752009-08-08 18:23:56 +0000187
Anders Carlsson45372a62009-07-23 03:17:50 +0000188 if (LayoutFields(D))
189 return;
Mike Stump1eb44332009-09-09 15:08:12 +0000190
Anders Carlsson45372a62009-07-23 03:17:50 +0000191 // We weren't able to layout the struct. Try again with a packed struct
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000192 Packed = true;
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000193 NextFieldOffsetInBytes = 0;
Anders Carlsson45372a62009-07-23 03:17:50 +0000194 FieldTypes.clear();
Anders Carlsson45372a62009-07-23 03:17:50 +0000195 LLVMFields.clear();
196 LLVMBitFields.clear();
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000197 LLVMNonVirtualBases.clear();
Mike Stump1eb44332009-09-09 15:08:12 +0000198
Anders Carlsson45372a62009-07-23 03:17:50 +0000199 LayoutFields(D);
200}
201
Daniel Dunbare7a80bd2010-09-02 23:53:28 +0000202CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
203 const FieldDecl *FD,
204 uint64_t FieldOffset,
205 uint64_t FieldSize,
206 uint64_t ContainingTypeSizeInBits,
207 unsigned ContainingTypeAlign) {
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000208 const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
Daniel Dunbarab970f92010-04-13 20:58:55 +0000209 uint64_t TypeSizeInBytes = Types.getTargetData().getTypeAllocSize(Ty);
210 uint64_t TypeSizeInBits = TypeSizeInBytes * 8;
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000211
212 bool IsSigned = FD->getType()->isSignedIntegerType();
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000213
Anders Carlsson1c7658f2010-04-16 16:23:02 +0000214 if (FieldSize > TypeSizeInBits) {
Anders Carlsson6ba38152010-04-17 22:54:57 +0000215 // We have a wide bit-field. The extra bits are only used for padding, so
216 // if we have a bitfield of type T, with size N:
217 //
218 // T t : N;
219 //
220 // We can just assume that it's:
221 //
222 // T t : sizeof(T);
223 //
224 FieldSize = TypeSizeInBits;
Anders Carlsson1c7658f2010-04-16 16:23:02 +0000225 }
226
Daniel Dunbare1467a42010-04-22 02:35:46 +0000227 // Compute the access components. The policy we use is to start by attempting
228 // to access using the width of the bit-field type itself and to always access
229 // at aligned indices of that type. If such an access would fail because it
230 // extends past the bound of the type, then we reduce size to the next smaller
231 // power of two and retry. The current algorithm assumes pow2 sized types,
232 // although this is easy to fix.
233 //
234 // FIXME: This algorithm is wrong on big-endian systems, I think.
235 assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
236 CGBitFieldInfo::AccessInfo Components[3];
237 unsigned NumComponents = 0;
238 unsigned AccessedTargetBits = 0; // The tumber of target bits accessed.
239 unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
Anders Carlsson1c7658f2010-04-16 16:23:02 +0000240
Daniel Dunbare1467a42010-04-22 02:35:46 +0000241 // Round down from the field offset to find the first access position that is
242 // at an aligned offset of the initial access type.
Daniel Dunbar52968a12010-04-22 15:22:33 +0000243 uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
244
245 // Adjust initial access size to fit within record.
246 while (AccessWidth > 8 &&
247 AccessStart + AccessWidth > ContainingTypeSizeInBits) {
248 AccessWidth >>= 1;
249 AccessStart = FieldOffset - (FieldOffset % AccessWidth);
250 }
Daniel Dunbar2df25692010-04-15 05:09:32 +0000251
Daniel Dunbare1467a42010-04-22 02:35:46 +0000252 while (AccessedTargetBits < FieldSize) {
253 // Check that we can access using a type of this size, without reading off
254 // the end of the structure. This can occur with packed structures and
255 // -fno-bitfield-type-align, for example.
256 if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
257 // If so, reduce access size to the next smaller power-of-two and retry.
258 AccessWidth >>= 1;
259 assert(AccessWidth >= 8 && "Cannot access under byte size!");
260 continue;
261 }
Daniel Dunbarab970f92010-04-13 20:58:55 +0000262
Daniel Dunbare1467a42010-04-22 02:35:46 +0000263 // Otherwise, add an access component.
Daniel Dunbarab970f92010-04-13 20:58:55 +0000264
Daniel Dunbare1467a42010-04-22 02:35:46 +0000265 // First, compute the bits inside this access which are part of the
266 // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
267 // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
268 // in the target that we are reading.
Daniel Dunbar52968a12010-04-22 15:22:33 +0000269 assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
270 assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
Daniel Dunbare1467a42010-04-22 02:35:46 +0000271 uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
272 uint64_t AccessBitsInFieldSize =
Daniel Dunbar52968a12010-04-22 15:22:33 +0000273 std::min(AccessWidth + AccessStart,
274 FieldOffset + FieldSize) - AccessBitsInFieldStart;
Daniel Dunbar4651efb2010-04-22 14:56:10 +0000275
Daniel Dunbare1467a42010-04-22 02:35:46 +0000276 assert(NumComponents < 3 && "Unexpected number of components!");
277 CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
278 AI.FieldIndex = 0;
279 // FIXME: We still follow the old access pattern of only using the field
280 // byte offset. We should switch this once we fix the struct layout to be
281 // pretty.
282 AI.FieldByteOffset = AccessStart / 8;
283 AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
284 AI.AccessWidth = AccessWidth;
Daniel Dunbar89da8742010-04-22 03:17:04 +0000285 AI.AccessAlignment = llvm::MinAlign(ContainingTypeAlign, AccessStart) / 8;
Daniel Dunbare1467a42010-04-22 02:35:46 +0000286 AI.TargetBitOffset = AccessedTargetBits;
287 AI.TargetBitWidth = AccessBitsInFieldSize;
288
289 AccessStart += AccessWidth;
290 AccessedTargetBits += AI.TargetBitWidth;
Daniel Dunbarab970f92010-04-13 20:58:55 +0000291 }
292
Daniel Dunbare1467a42010-04-22 02:35:46 +0000293 assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
Daniel Dunbar2df25692010-04-15 05:09:32 +0000294 return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000295}
296
Daniel Dunbare7a80bd2010-09-02 23:53:28 +0000297CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
298 const FieldDecl *FD,
299 uint64_t FieldOffset,
300 uint64_t FieldSize) {
301 const RecordDecl *RD = FD->getParent();
302 const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
303 uint64_t ContainingTypeSizeInBits = RL.getSize();
304 unsigned ContainingTypeAlign = RL.getAlignment();
305
306 return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
307 ContainingTypeAlign);
308}
309
Anders Carlsson45372a62009-07-23 03:17:50 +0000310void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
311 uint64_t FieldOffset) {
Mike Stump1eb44332009-09-09 15:08:12 +0000312 uint64_t FieldSize =
Anders Carlsson45372a62009-07-23 03:17:50 +0000313 D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
Mike Stump1eb44332009-09-09 15:08:12 +0000314
Anders Carlsson45372a62009-07-23 03:17:50 +0000315 if (FieldSize == 0)
316 return;
317
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000318 uint64_t NextFieldOffset = NextFieldOffsetInBytes * 8;
Anders Carlsson45372a62009-07-23 03:17:50 +0000319 unsigned NumBytesToAppend;
Mike Stump1eb44332009-09-09 15:08:12 +0000320
Anders Carlsson45372a62009-07-23 03:17:50 +0000321 if (FieldOffset < NextFieldOffset) {
322 assert(BitsAvailableInLastField && "Bitfield size mismatch!");
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000323 assert(NextFieldOffsetInBytes && "Must have laid out at least one byte!");
Mike Stump1eb44332009-09-09 15:08:12 +0000324
Anders Carlsson45372a62009-07-23 03:17:50 +0000325 // The bitfield begins in the previous bit-field.
Mike Stump1eb44332009-09-09 15:08:12 +0000326 NumBytesToAppend =
Anders Carlsson45372a62009-07-23 03:17:50 +0000327 llvm::RoundUpToAlignment(FieldSize - BitsAvailableInLastField, 8) / 8;
328 } else {
329 assert(FieldOffset % 8 == 0 && "Field offset not aligned correctly");
330
331 // Append padding if necessary.
Anders Carlsson57d2d232010-12-04 23:53:18 +0000332 AppendPadding(FieldOffset / 8, 1);
Mike Stump1eb44332009-09-09 15:08:12 +0000333
334 NumBytesToAppend =
Anders Carlsson45372a62009-07-23 03:17:50 +0000335 llvm::RoundUpToAlignment(FieldSize, 8) / 8;
Mike Stump1eb44332009-09-09 15:08:12 +0000336
Anders Carlsson45372a62009-07-23 03:17:50 +0000337 assert(NumBytesToAppend && "No bytes to append!");
338 }
339
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000340 // Add the bit field info.
341 LLVMBitFields.push_back(
Daniel Dunbare7a80bd2010-09-02 23:53:28 +0000342 LLVMBitFieldInfo(D, CGBitFieldInfo::MakeInfo(Types, D, FieldOffset,
343 FieldSize)));
Mike Stump1eb44332009-09-09 15:08:12 +0000344
Anders Carlsson45372a62009-07-23 03:17:50 +0000345 AppendBytes(NumBytesToAppend);
Mike Stump1eb44332009-09-09 15:08:12 +0000346
Mike Stump1eb44332009-09-09 15:08:12 +0000347 BitsAvailableInLastField =
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000348 NextFieldOffsetInBytes * 8 - (FieldOffset + FieldSize);
Anders Carlsson45372a62009-07-23 03:17:50 +0000349}
350
351bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
352 uint64_t FieldOffset) {
Anders Carlsson45372a62009-07-23 03:17:50 +0000353 // If the field is packed, then we need a packed struct.
Anders Carlssona860e752009-08-08 18:23:56 +0000354 if (!Packed && D->hasAttr<PackedAttr>())
Anders Carlsson45372a62009-07-23 03:17:50 +0000355 return false;
356
357 if (D->isBitField()) {
358 // We must use packed structs for unnamed bit fields since they
359 // don't affect the struct alignment.
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000360 if (!Packed && !D->getDeclName())
Anders Carlsson45372a62009-07-23 03:17:50 +0000361 return false;
Mike Stump1eb44332009-09-09 15:08:12 +0000362
Anders Carlsson45372a62009-07-23 03:17:50 +0000363 LayoutBitField(D, FieldOffset);
364 return true;
365 }
Mike Stump1eb44332009-09-09 15:08:12 +0000366
John McCallf16aa102010-08-22 21:01:12 +0000367 CheckZeroInitializable(D->getType());
Daniel Dunbar270e2032010-03-31 00:11:27 +0000368
Anders Carlsson45372a62009-07-23 03:17:50 +0000369 assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
Anders Carlsson45372a62009-07-23 03:17:50 +0000370 uint64_t FieldOffsetInBytes = FieldOffset / 8;
Mike Stump1eb44332009-09-09 15:08:12 +0000371
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000372 const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
373 unsigned TypeAlignment = getTypeAlignment(Ty);
374
Anders Carlssona5dd7222009-08-08 19:38:24 +0000375 // If the type alignment is larger then the struct alignment, we must use
376 // a packed struct.
377 if (TypeAlignment > Alignment) {
378 assert(!Packed && "Alignment is wrong even with packed struct!");
379 return false;
380 }
Mike Stump1eb44332009-09-09 15:08:12 +0000381
Anders Carlssona5dd7222009-08-08 19:38:24 +0000382 if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
383 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
Daniel Dunbar8a2c92c2010-05-27 01:12:46 +0000384 if (const MaxFieldAlignmentAttr *MFAA =
385 RD->getAttr<MaxFieldAlignmentAttr>()) {
386 if (MFAA->getAlignment() != TypeAlignment * 8 && !Packed)
Anders Carlssona5dd7222009-08-08 19:38:24 +0000387 return false;
388 }
389 }
390
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000391 // Round up the field offset to the alignment of the field type.
Mike Stump1eb44332009-09-09 15:08:12 +0000392 uint64_t AlignedNextFieldOffsetInBytes =
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000393 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, TypeAlignment);
394
395 if (FieldOffsetInBytes < AlignedNextFieldOffsetInBytes) {
396 assert(!Packed && "Could not place field even with packed struct!");
397 return false;
398 }
Mike Stump1eb44332009-09-09 15:08:12 +0000399
Anders Carlsson57d2d232010-12-04 23:53:18 +0000400 AppendPadding(FieldOffsetInBytes, TypeAlignment);
Mike Stump1eb44332009-09-09 15:08:12 +0000401
Anders Carlsson45372a62009-07-23 03:17:50 +0000402 // Now append the field.
403 LLVMFields.push_back(LLVMFieldInfo(D, FieldTypes.size()));
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000404 AppendField(FieldOffsetInBytes, Ty);
Mike Stump1eb44332009-09-09 15:08:12 +0000405
Anders Carlsson45372a62009-07-23 03:17:50 +0000406 return true;
407}
408
Anders Carlsson86664462010-04-17 20:49:27 +0000409const llvm::Type *
410CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
411 const ASTRecordLayout &Layout) {
412 if (Field->isBitField()) {
413 uint64_t FieldSize =
414 Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
415
416 // Ignore zero sized bit fields.
417 if (FieldSize == 0)
418 return 0;
419
Daniel Dunbar8ab78a72010-04-20 17:52:30 +0000420 const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
421 unsigned NumBytesToAppend =
422 llvm::RoundUpToAlignment(FieldSize, 8) / 8;
Anders Carlssond62328e2010-04-17 21:04:52 +0000423
Daniel Dunbar8ab78a72010-04-20 17:52:30 +0000424 if (NumBytesToAppend > 1)
425 FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend);
Anders Carlssond62328e2010-04-17 21:04:52 +0000426
Anders Carlsson86664462010-04-17 20:49:27 +0000427 // Add the bit field info.
428 LLVMBitFields.push_back(
Daniel Dunbare7a80bd2010-09-02 23:53:28 +0000429 LLVMBitFieldInfo(Field, CGBitFieldInfo::MakeInfo(Types, Field,
430 0, FieldSize)));
Anders Carlssond62328e2010-04-17 21:04:52 +0000431 return FieldTy;
Anders Carlsson86664462010-04-17 20:49:27 +0000432 }
Daniel Dunbar8ab78a72010-04-20 17:52:30 +0000433
Anders Carlsson86664462010-04-17 20:49:27 +0000434 // This is a regular union field.
435 LLVMFields.push_back(LLVMFieldInfo(Field, 0));
436 return Types.ConvertTypeForMemRecursive(Field->getType());
437}
438
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000439void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
440 assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
Mike Stump1eb44332009-09-09 15:08:12 +0000441
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000442 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
Mike Stump1eb44332009-09-09 15:08:12 +0000443
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000444 const llvm::Type *Ty = 0;
445 uint64_t Size = 0;
446 unsigned Align = 0;
Mike Stump1eb44332009-09-09 15:08:12 +0000447
Anders Carlsson21fd7d72010-01-28 18:22:03 +0000448 bool HasOnlyZeroSizedBitFields = true;
Daniel Dunbar270e2032010-03-31 00:11:27 +0000449
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000450 unsigned FieldNo = 0;
Mike Stump1eb44332009-09-09 15:08:12 +0000451 for (RecordDecl::field_iterator Field = D->field_begin(),
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000452 FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
Mike Stump1eb44332009-09-09 15:08:12 +0000453 assert(Layout.getFieldOffset(FieldNo) == 0 &&
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000454 "Union field offset did not start at the beginning of record!");
Anders Carlsson86664462010-04-17 20:49:27 +0000455 const llvm::Type *FieldTy = LayoutUnionField(*Field, Layout);
Anders Carlsson2cc8f172009-07-23 04:00:39 +0000456
Anders Carlsson86664462010-04-17 20:49:27 +0000457 if (!FieldTy)
458 continue;
Mike Stump1eb44332009-09-09 15:08:12 +0000459
Anders Carlsson21fd7d72010-01-28 18:22:03 +0000460 HasOnlyZeroSizedBitFields = false;
Daniel Dunbar270e2032010-03-31 00:11:27 +0000461
Anders Carlsson177d4d82009-07-23 21:52:03 +0000462 unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
463 uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
Mike Stump1eb44332009-09-09 15:08:12 +0000464
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000465 if (FieldAlign < Align)
466 continue;
Mike Stump1eb44332009-09-09 15:08:12 +0000467
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000468 if (FieldAlign > Align || FieldSize > Size) {
469 Ty = FieldTy;
470 Align = FieldAlign;
471 Size = FieldSize;
472 }
473 }
Mike Stump1eb44332009-09-09 15:08:12 +0000474
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000475 // Now add our field.
Anders Carlsson36620002009-09-03 22:56:02 +0000476 if (Ty) {
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000477 AppendField(0, Ty);
Anders Carlsson36620002009-09-03 22:56:02 +0000478
479 if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) {
480 // We need a packed struct.
481 Packed = true;
482 Align = 1;
483 }
484 }
Fariborz Jahaniane5041702009-11-06 20:47:40 +0000485 if (!Align) {
Anders Carlsson21fd7d72010-01-28 18:22:03 +0000486 assert(HasOnlyZeroSizedBitFields &&
487 "0-align record did not have all zero-sized bit-fields!");
Fariborz Jahaniane5041702009-11-06 20:47:40 +0000488 Align = 1;
489 }
Daniel Dunbar270e2032010-03-31 00:11:27 +0000490
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000491 // Append tail padding.
492 if (Layout.getSize() / 8 > Size)
493 AppendPadding(Layout.getSize() / 8, Align);
494}
495
Anders Carlsson860453c2010-12-04 23:59:48 +0000496void CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *BaseDecl,
497 uint64_t BaseOffset) {
498 CheckZeroInitializable(BaseDecl);
499
500 const ASTRecordLayout &Layout =
501 Types.getContext().getASTRecordLayout(BaseDecl);
502
503 uint64_t NonVirtualSize = Layout.getNonVirtualSize();
504
505 AppendPadding(BaseOffset / 8, 1);
506
507 // FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can.
508 AppendBytes(NonVirtualSize / 8);
509}
510
Anders Carlsson8f2c6892010-11-25 01:59:35 +0000511void
512CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *BaseDecl,
513 uint64_t BaseOffset) {
514 // Ignore empty bases.
515 if (BaseDecl->isEmpty())
516 return;
517
518 CheckZeroInitializable(BaseDecl);
519
520 const ASTRecordLayout &Layout =
521 Types.getContext().getASTRecordLayout(BaseDecl);
522
523 uint64_t NonVirtualSize = Layout.getNonVirtualSize();
524
Anders Carlsson8f2c6892010-11-25 01:59:35 +0000525 AppendPadding(BaseOffset / 8, 1);
526
Anders Carlsson860453c2010-12-04 23:59:48 +0000527 // FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can.
Anders Carlsson8f2c6892010-11-25 01:59:35 +0000528 AppendBytes(NonVirtualSize / 8);
529
Anders Carlsson860453c2010-12-04 23:59:48 +0000530 // FIXME: Add the vbase field info.
Anders Carlsson8f2c6892010-11-25 01:59:35 +0000531}
532
Anders Carlsson1d7dc222010-11-28 19:18:44 +0000533/// LayoutVirtualBases - layout the non-virtual bases of a record decl.
534void
535CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
536 const ASTRecordLayout &Layout) {
537 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
538 E = RD->bases_end(); I != E; ++I) {
539 const CXXRecordDecl *BaseDecl =
540 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
541
542 // We only want to lay out virtual bases that aren't indirect primary bases
543 // of some other base.
544 if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) {
545 // Only lay out the base once.
546 if (!LaidOutVirtualBases.insert(BaseDecl))
547 continue;
548
549 uint64_t VBaseOffset = Layout.getVBaseClassOffsetInBits(BaseDecl);
550 LayoutVirtualBase(BaseDecl, VBaseOffset);
551 }
552
553 if (!BaseDecl->getNumVBases()) {
554 // This base isn't interesting since it doesn't have any virtual bases.
555 continue;
556 }
557
558 LayoutVirtualBases(BaseDecl, Layout);
559 }
560}
561
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000562void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
563 uint64_t BaseOffset) {
Anders Carlssona03613d2010-11-22 00:03:08 +0000564 // Ignore empty bases.
565 if (BaseDecl->isEmpty())
566 return;
567
Anders Carlsson860453c2010-12-04 23:59:48 +0000568 LayoutBase(BaseDecl, BaseOffset);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000569
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000570 // Append the base field.
Anders Carlsson860453c2010-12-04 23:59:48 +0000571 LLVMNonVirtualBases.push_back(LLVMBaseInfo(BaseDecl, FieldTypes.size() - 1));
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000572}
573
574void
575CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
576 const ASTRecordLayout &Layout) {
577 const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
578
579 // Check if we need to add a vtable pointer.
580 if (RD->isDynamicClass()) {
581 if (!PrimaryBase) {
582 const llvm::Type *FunctionType =
583 llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
584 /*isVarArg=*/true);
585 const llvm::Type *VTableTy = FunctionType->getPointerTo();
586
587 assert(NextFieldOffsetInBytes == 0 &&
588 "VTable pointer must come first!");
589 AppendField(NextFieldOffsetInBytes, VTableTy->getPointerTo());
590 } else {
Anders Carlssonc9e814b2010-11-24 23:12:57 +0000591 if (!Layout.isPrimaryBaseVirtual())
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000592 LayoutNonVirtualBase(PrimaryBase, 0);
Anders Carlsson8f2c6892010-11-25 01:59:35 +0000593 else
594 LayoutVirtualBase(PrimaryBase, 0);
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000595 }
596 }
597
598 // Layout the non-virtual bases.
599 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
600 E = RD->bases_end(); I != E; ++I) {
601 if (I->isVirtual())
602 continue;
603
604 const CXXRecordDecl *BaseDecl =
605 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
606
607 // We've already laid out the primary base.
Anders Carlssonc9e814b2010-11-24 23:12:57 +0000608 if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000609 continue;
610
Anders Carlssona14f5972010-10-31 23:22:37 +0000611 LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffsetInBits(BaseDecl));
Anders Carlsson4b3e5be2009-12-16 17:27:20 +0000612 }
613}
614
Argyrios Kyrtzidisdb2b42f2010-12-10 00:11:00 +0000615bool
Anders Carlsson3d155e62010-11-09 05:25:47 +0000616CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
617 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
618
619 uint64_t AlignedNonVirtualTypeSize =
620 llvm::RoundUpToAlignment(Layout.getNonVirtualSize(),
621 Layout.getNonVirtualAlign()) / 8;
622
623
624 // First check if we can use the same fields as for the complete class.
625 if (AlignedNonVirtualTypeSize == Layout.getSize() / 8) {
626 NonVirtualBaseTypeIsSameAsCompleteType = true;
Argyrios Kyrtzidisdb2b42f2010-12-10 00:11:00 +0000627 return true;
Anders Carlsson3d155e62010-11-09 05:25:47 +0000628 }
629
Anders Carlsson3d155e62010-11-09 05:25:47 +0000630 // Check if we need padding.
631 uint64_t AlignedNextFieldOffset =
Anders Carlssonfc86d552010-11-28 23:06:23 +0000632 llvm::RoundUpToAlignment(NextFieldOffsetInBytes,
633 getAlignmentAsLLVMStruct());
Anders Carlsson3d155e62010-11-09 05:25:47 +0000634
Argyrios Kyrtzidisdb2b42f2010-12-10 00:11:00 +0000635 if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize)
636 return false; // Needs packing.
637
638 NonVirtualBaseFieldTypes = FieldTypes;
Anders Carlsson3d155e62010-11-09 05:25:47 +0000639
640 if (AlignedNonVirtualTypeSize == AlignedNextFieldOffset) {
641 // We don't need any padding.
Argyrios Kyrtzidisdb2b42f2010-12-10 00:11:00 +0000642 return true;
Anders Carlsson3d155e62010-11-09 05:25:47 +0000643 }
644
645 uint64_t NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
646 NonVirtualBaseFieldTypes.push_back(getByteArrayType(NumBytes));
Argyrios Kyrtzidisdb2b42f2010-12-10 00:11:00 +0000647 return true;
Anders Carlsson3d155e62010-11-09 05:25:47 +0000648}
649
Anders Carlsson45372a62009-07-23 03:17:50 +0000650bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
651 assert(!D->isUnion() && "Can't call LayoutFields on a union!");
Anders Carlssona5dd7222009-08-08 19:38:24 +0000652 assert(Alignment && "Did not set alignment!");
Mike Stump1eb44332009-09-09 15:08:12 +0000653
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000654 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
Mike Stump1eb44332009-09-09 15:08:12 +0000655
Anders Carlsson3d155e62010-11-09 05:25:47 +0000656 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
657 if (RD)
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000658 LayoutNonVirtualBases(RD, Layout);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000659
Anders Carlsson45372a62009-07-23 03:17:50 +0000660 unsigned FieldNo = 0;
Fariborz Jahaniancad86652009-07-27 20:57:45 +0000661
Mike Stump1eb44332009-09-09 15:08:12 +0000662 for (RecordDecl::field_iterator Field = D->field_begin(),
Anders Carlsson45372a62009-07-23 03:17:50 +0000663 FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
664 if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
Mike Stump1eb44332009-09-09 15:08:12 +0000665 assert(!Packed &&
Anders Carlsson45372a62009-07-23 03:17:50 +0000666 "Could not layout fields even with a packed LLVM struct!");
667 return false;
668 }
669 }
670
Anders Carlsson8f2c6892010-11-25 01:59:35 +0000671 if (RD) {
Anders Carlsson1d7dc222010-11-28 19:18:44 +0000672 // We've laid out the non-virtual bases and the fields, now compute the
673 // non-virtual base field types.
Argyrios Kyrtzidisdb2b42f2010-12-10 00:11:00 +0000674 if (!ComputeNonVirtualBaseType(RD)) {
675 assert(!Packed && "Could not layout even with a packed LLVM struct!");
676 return false;
677 }
Anders Carlsson8f2c6892010-11-25 01:59:35 +0000678
Anders Carlsson1d7dc222010-11-28 19:18:44 +0000679 // And lay out the virtual bases.
680 RD->getIndirectPrimaryBases(IndirectPrimaryBases);
681 if (Layout.isPrimaryBaseVirtual())
682 IndirectPrimaryBases.insert(Layout.getPrimaryBase());
683 LayoutVirtualBases(RD, Layout);
684 }
Anders Carlsson3d155e62010-11-09 05:25:47 +0000685
Anders Carlsson45372a62009-07-23 03:17:50 +0000686 // Append tail padding if necessary.
Anders Carlssonc1efe362009-07-27 14:55:54 +0000687 AppendTailPadding(Layout.getSize());
Mike Stump1eb44332009-09-09 15:08:12 +0000688
Anders Carlsson45372a62009-07-23 03:17:50 +0000689 return true;
690}
691
Anders Carlssonc1efe362009-07-27 14:55:54 +0000692void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
693 assert(RecordSize % 8 == 0 && "Invalid record size!");
Mike Stump1eb44332009-09-09 15:08:12 +0000694
Anders Carlssonc1efe362009-07-27 14:55:54 +0000695 uint64_t RecordSizeInBytes = RecordSize / 8;
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000696 assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
Mike Stump1eb44332009-09-09 15:08:12 +0000697
Daniel Dunbar270e2032010-03-31 00:11:27 +0000698 uint64_t AlignedNextFieldOffset =
Anders Carlssonfc86d552010-11-28 23:06:23 +0000699 llvm::RoundUpToAlignment(NextFieldOffsetInBytes,
700 getAlignmentAsLLVMStruct());
Anders Carlssonc2456822009-12-08 01:24:23 +0000701
702 if (AlignedNextFieldOffset == RecordSizeInBytes) {
703 // We don't need any padding.
704 return;
705 }
Daniel Dunbar270e2032010-03-31 00:11:27 +0000706
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000707 unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
Anders Carlssonc1efe362009-07-27 14:55:54 +0000708 AppendBytes(NumPadBytes);
709}
710
Mike Stump1eb44332009-09-09 15:08:12 +0000711void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
Anders Carlsson45372a62009-07-23 03:17:50 +0000712 const llvm::Type *FieldTy) {
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000713 uint64_t FieldSizeInBytes = Types.getTargetData().getTypeAllocSize(FieldTy);
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000714
Anders Carlsson45372a62009-07-23 03:17:50 +0000715 FieldTypes.push_back(FieldTy);
Anders Carlsson45372a62009-07-23 03:17:50 +0000716
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000717 NextFieldOffsetInBytes = FieldOffsetInBytes + FieldSizeInBytes;
Anders Carlsson45372a62009-07-23 03:17:50 +0000718 BitsAvailableInLastField = 0;
719}
720
Mike Stump1eb44332009-09-09 15:08:12 +0000721void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
Anders Carlsson57d2d232010-12-04 23:53:18 +0000722 unsigned FieldAlignmentInBytes) {
Anders Carlsson45372a62009-07-23 03:17:50 +0000723 assert(NextFieldOffsetInBytes <= FieldOffsetInBytes &&
724 "Incorrect field layout!");
Mike Stump1eb44332009-09-09 15:08:12 +0000725
Anders Carlsson45372a62009-07-23 03:17:50 +0000726 // Round up the field offset to the alignment of the field type.
Mike Stump1eb44332009-09-09 15:08:12 +0000727 uint64_t AlignedNextFieldOffsetInBytes =
Anders Carlsson57d2d232010-12-04 23:53:18 +0000728 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignmentInBytes);
Anders Carlsson45372a62009-07-23 03:17:50 +0000729
730 if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
731 // Even with alignment, the field offset is not at the right place,
732 // insert padding.
733 uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
734
735 AppendBytes(PaddingInBytes);
736 }
737}
738
Anders Carlsson3d155e62010-11-09 05:25:47 +0000739const llvm::Type *CGRecordLayoutBuilder::getByteArrayType(uint64_t NumBytes) {
740 assert(NumBytes != 0 && "Empty byte array's aren't allowed.");
Mike Stump1eb44332009-09-09 15:08:12 +0000741
Owen Anderson0032b272009-08-13 21:57:51 +0000742 const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
Anders Carlssonc1efe362009-07-27 14:55:54 +0000743 if (NumBytes > 1)
Anders Carlsson45372a62009-07-23 03:17:50 +0000744 Ty = llvm::ArrayType::get(Ty, NumBytes);
Mike Stump1eb44332009-09-09 15:08:12 +0000745
Anders Carlsson3d155e62010-11-09 05:25:47 +0000746 return Ty;
747}
748
749void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) {
750 if (NumBytes == 0)
751 return;
752
Anders Carlsson45372a62009-07-23 03:17:50 +0000753 // Append the padding field
Anders Carlsson3d155e62010-11-09 05:25:47 +0000754 AppendField(NextFieldOffsetInBytes, getByteArrayType(NumBytes));
Anders Carlsson45372a62009-07-23 03:17:50 +0000755}
756
757unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000758 if (Packed)
Anders Carlsson45372a62009-07-23 03:17:50 +0000759 return 1;
Mike Stump1eb44332009-09-09 15:08:12 +0000760
Anders Carlsson45372a62009-07-23 03:17:50 +0000761 return Types.getTargetData().getABITypeAlignment(Ty);
762}
763
Anders Carlssonfc86d552010-11-28 23:06:23 +0000764unsigned CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
765 if (Packed)
766 return 1;
767
768 unsigned MaxAlignment = 1;
769 for (size_t i = 0; i != FieldTypes.size(); ++i)
770 MaxAlignment = std::max(MaxAlignment, getTypeAlignment(FieldTypes[i]));
771
772 return MaxAlignment;
773}
774
John McCallf16aa102010-08-22 21:01:12 +0000775void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000776 // This record already contains a member pointer.
John McCallf16aa102010-08-22 21:01:12 +0000777 if (!IsZeroInitializable)
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000778 return;
Mike Stump1eb44332009-09-09 15:08:12 +0000779
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000780 // Can only have member pointers if we're compiling C++.
781 if (!Types.getContext().getLangOptions().CPlusPlus)
782 return;
Mike Stump1eb44332009-09-09 15:08:12 +0000783
Anders Carlsson2c12d032010-02-02 05:17:25 +0000784 T = Types.getContext().getBaseElementType(T);
Mike Stump1eb44332009-09-09 15:08:12 +0000785
Anders Carlsson2c12d032010-02-02 05:17:25 +0000786 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
John McCallf16aa102010-08-22 21:01:12 +0000787 if (!Types.getCXXABI().isZeroInitializable(MPT))
788 IsZeroInitializable = false;
Anders Carlsson2c12d032010-02-02 05:17:25 +0000789 } else if (const RecordType *RT = T->getAs<RecordType>()) {
790 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
John McCallf16aa102010-08-22 21:01:12 +0000791 CheckZeroInitializable(RD);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000792 }
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000793}
794
John McCallf16aa102010-08-22 21:01:12 +0000795void CGRecordLayoutBuilder::CheckZeroInitializable(const CXXRecordDecl *RD) {
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000796 // This record already contains a member pointer.
John McCallf16aa102010-08-22 21:01:12 +0000797 if (!IsZeroInitializable)
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000798 return;
799
Anders Carlsson3379e9b2010-11-24 19:57:04 +0000800 const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
John McCallf16aa102010-08-22 21:01:12 +0000801 if (!Layout.isZeroInitializable())
802 IsZeroInitializable = false;
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000803}
804
Daniel Dunbar270e2032010-03-31 00:11:27 +0000805CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
806 CGRecordLayoutBuilder Builder(*this);
Mike Stump1eb44332009-09-09 15:08:12 +0000807
Anders Carlsson45372a62009-07-23 03:17:50 +0000808 Builder.Layout(D);
Anders Carlsson4c98efd2009-07-24 15:20:52 +0000809
Anders Carlssonba2c2ee2010-11-24 19:37:16 +0000810 const llvm::StructType *Ty = llvm::StructType::get(getLLVMContext(),
811 Builder.FieldTypes,
812 Builder.Packed);
Mike Stump1eb44332009-09-09 15:08:12 +0000813
Anders Carlssonba2c2ee2010-11-24 19:37:16 +0000814 const llvm::StructType *BaseTy = 0;
Anders Carlsson3d155e62010-11-09 05:25:47 +0000815 if (isa<CXXRecordDecl>(D)) {
816 if (Builder.NonVirtualBaseTypeIsSameAsCompleteType)
817 BaseTy = Ty;
818 else if (!Builder.NonVirtualBaseFieldTypes.empty())
819 BaseTy = llvm::StructType::get(getLLVMContext(),
820 Builder.NonVirtualBaseFieldTypes,
821 Builder.Packed);
822 }
823
Daniel Dunbar198bcb42010-03-31 01:09:11 +0000824 CGRecordLayout *RL =
Anders Carlsson3d155e62010-11-09 05:25:47 +0000825 new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable);
Daniel Dunbar198bcb42010-03-31 01:09:11 +0000826
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000827 // Add all the non-virtual base field numbers.
828 RL->NonVirtualBaseFields.insert(Builder.LLVMNonVirtualBases.begin(),
829 Builder.LLVMNonVirtualBases.end());
830
Anders Carlsson45372a62009-07-23 03:17:50 +0000831 // Add all the field numbers.
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000832 RL->FieldInfo.insert(Builder.LLVMFields.begin(),
833 Builder.LLVMFields.end());
Anders Carlsson45372a62009-07-23 03:17:50 +0000834
835 // Add bitfield info.
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000836 RL->BitFields.insert(Builder.LLVMBitFields.begin(),
837 Builder.LLVMBitFields.end());
Mike Stump1eb44332009-09-09 15:08:12 +0000838
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000839 // Dump the layout, if requested.
Daniel Dunbarab970f92010-04-13 20:58:55 +0000840 if (getContext().getLangOptions().DumpRecordLayouts) {
Daniel Dunbar8d8ab742010-04-19 20:44:53 +0000841 llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
Daniel Dunbarab970f92010-04-13 20:58:55 +0000842 llvm::errs() << "Record: ";
843 D->dump();
844 llvm::errs() << "\nLayout: ";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000845 RL->dump();
Daniel Dunbarab970f92010-04-13 20:58:55 +0000846 }
Daniel Dunbar93c62962010-04-12 18:14:18 +0000847
Daniel Dunbare1467a42010-04-22 02:35:46 +0000848#ifndef NDEBUG
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000849 // Verify that the computed LLVM struct size matches the AST layout size.
Anders Carlsson3d155e62010-11-09 05:25:47 +0000850 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
851
852 uint64_t TypeSizeInBits = Layout.getSize();
Daniel Dunbare1467a42010-04-22 02:35:46 +0000853 assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000854 "Type size mismatch!");
855
Anders Carlsson3d155e62010-11-09 05:25:47 +0000856 if (BaseTy) {
857 uint64_t AlignedNonVirtualTypeSizeInBits =
858 llvm::RoundUpToAlignment(Layout.getNonVirtualSize(),
859 Layout.getNonVirtualAlign());
860
861 assert(AlignedNonVirtualTypeSizeInBits ==
862 getTargetData().getTypeAllocSizeInBits(BaseTy) &&
863 "Type size mismatch!");
864 }
865
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000866 // Verify that the LLVM and AST field offsets agree.
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000867 const llvm::StructType *ST =
868 dyn_cast<llvm::StructType>(RL->getLLVMType());
869 const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
870
871 const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
872 RecordDecl::field_iterator it = D->field_begin();
873 for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
874 const FieldDecl *FD = *it;
Daniel Dunbare1467a42010-04-22 02:35:46 +0000875
876 // For non-bit-fields, just check that the LLVM struct offset matches the
877 // AST offset.
878 if (!FD->isBitField()) {
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000879 unsigned FieldNo = RL->getLLVMFieldNo(FD);
880 assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
881 "Invalid field offset!");
Daniel Dunbare1467a42010-04-22 02:35:46 +0000882 continue;
883 }
884
885 // Ignore unnamed bit-fields.
886 if (!FD->getDeclName())
887 continue;
888
889 const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
890 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
891 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
892
893 // Verify that every component access is within the structure.
894 uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
895 uint64_t AccessBitOffset = FieldOffset + AI.FieldByteOffset * 8;
896 assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
897 "Invalid bit-field access (out of range)!");
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000898 }
899 }
900#endif
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000901
Daniel Dunbar198bcb42010-03-31 01:09:11 +0000902 return RL;
Anders Carlsson45372a62009-07-23 03:17:50 +0000903}
Daniel Dunbar93c62962010-04-12 18:14:18 +0000904
905void CGRecordLayout::print(llvm::raw_ostream &OS) const {
906 OS << "<CGRecordLayout\n";
907 OS << " LLVMType:" << *LLVMType << "\n";
Anders Carlsson9a5a3f22010-11-21 23:59:45 +0000908 if (NonVirtualBaseLLVMType)
909 OS << " NonVirtualBaseLLVMType:" << *NonVirtualBaseLLVMType << "\n";
John McCallf16aa102010-08-22 21:01:12 +0000910 OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000911 OS << " BitFields:[\n";
Daniel Dunbarad759532010-04-22 02:35:36 +0000912
913 // Print bit-field infos in declaration order.
914 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
Daniel Dunbar93c62962010-04-12 18:14:18 +0000915 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
916 it = BitFields.begin(), ie = BitFields.end();
917 it != ie; ++it) {
Daniel Dunbarad759532010-04-22 02:35:36 +0000918 const RecordDecl *RD = it->first->getParent();
919 unsigned Index = 0;
920 for (RecordDecl::field_iterator
921 it2 = RD->field_begin(); *it2 != it->first; ++it2)
922 ++Index;
923 BFIs.push_back(std::make_pair(Index, &it->second));
924 }
925 llvm::array_pod_sort(BFIs.begin(), BFIs.end());
926 for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
Daniel Dunbarab970f92010-04-13 20:58:55 +0000927 OS.indent(4);
Daniel Dunbarad759532010-04-22 02:35:36 +0000928 BFIs[i].second->print(OS);
Daniel Dunbar93c62962010-04-12 18:14:18 +0000929 OS << "\n";
930 }
Daniel Dunbarad759532010-04-22 02:35:36 +0000931
Daniel Dunbar93c62962010-04-12 18:14:18 +0000932 OS << "]>\n";
933}
934
935void CGRecordLayout::dump() const {
936 print(llvm::errs());
937}
938
939void CGBitFieldInfo::print(llvm::raw_ostream &OS) const {
940 OS << "<CGBitFieldInfo";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000941 OS << " Size:" << Size;
Daniel Dunbarab970f92010-04-13 20:58:55 +0000942 OS << " IsSigned:" << IsSigned << "\n";
943
944 OS.indent(4 + strlen("<CGBitFieldInfo"));
945 OS << " NumComponents:" << getNumComponents();
946 OS << " Components: [";
947 if (getNumComponents()) {
948 OS << "\n";
949 for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
950 const AccessInfo &AI = getComponent(i);
951 OS.indent(8);
952 OS << "<AccessInfo"
953 << " FieldIndex:" << AI.FieldIndex
954 << " FieldByteOffset:" << AI.FieldByteOffset
955 << " FieldBitStart:" << AI.FieldBitStart
956 << " AccessWidth:" << AI.AccessWidth << "\n";
957 OS.indent(8 + strlen("<AccessInfo"));
958 OS << " AccessAlignment:" << AI.AccessAlignment
959 << " TargetBitOffset:" << AI.TargetBitOffset
960 << " TargetBitWidth:" << AI.TargetBitWidth
961 << ">\n";
962 }
963 OS.indent(4);
964 }
965 OS << "]>";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000966}
967
968void CGBitFieldInfo::dump() const {
969 print(llvm::errs());
970}