blob: 0d9c6d4964207230e2ffd318140bc18a960996bf [file] [log] [blame]
Daniel Dunbar270e2032010-03-31 00:11:27 +00001//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
Anders Carlsson45372a62009-07-23 03:17:50 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
Daniel Dunbar270e2032010-03-31 00:11:27 +000010// Builder implementation for CGRecordLayout objects.
Anders Carlsson45372a62009-07-23 03:17:50 +000011//
12//===----------------------------------------------------------------------===//
13
Daniel Dunbar2924ade2010-03-30 22:26:10 +000014#include "CGRecordLayout.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000015#include "clang/AST/ASTContext.h"
16#include "clang/AST/Attr.h"
17#include "clang/AST/DeclCXX.h"
18#include "clang/AST/Expr.h"
19#include "clang/AST/RecordLayout.h"
20#include "CodeGenTypes.h"
John McCallf16aa102010-08-22 21:01:12 +000021#include "CGCXXABI.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000022#include "llvm/DerivedTypes.h"
Daniel Dunbar93c62962010-04-12 18:14:18 +000023#include "llvm/Type.h"
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +000024#include "llvm/Support/Debug.h"
Daniel Dunbar93c62962010-04-12 18:14:18 +000025#include "llvm/Support/raw_ostream.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000026#include "llvm/Target/TargetData.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000027using namespace clang;
28using namespace CodeGen;
29
Daniel Dunbar270e2032010-03-31 00:11:27 +000030namespace clang {
31namespace CodeGen {
32
33class CGRecordLayoutBuilder {
34public:
35 /// FieldTypes - Holds the LLVM types that the struct is created from.
36 std::vector<const llvm::Type *> FieldTypes;
37
Anders Carlsson3d155e62010-11-09 05:25:47 +000038 /// NonVirtualBaseFieldTypes - Holds the LLVM types for the non-virtual part
39 /// of the struct. For example, consider:
40 ///
41 /// struct A { int i; };
42 /// struct B { void *v; };
43 /// struct C : virtual A, B { };
44 ///
45 /// The LLVM type of C will be
46 /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
47 ///
48 /// And the LLVM type of the non-virtual base struct will be
49 /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
50 std::vector<const llvm::Type *> NonVirtualBaseFieldTypes;
51
52 /// NonVirtualBaseTypeIsSameAsCompleteType - Whether the non-virtual part of
53 /// the struct is equivalent to the complete struct.
54 bool NonVirtualBaseTypeIsSameAsCompleteType;
55
Daniel Dunbar270e2032010-03-31 00:11:27 +000056 /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number.
57 typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo;
58 llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields;
59
60 /// LLVMBitFieldInfo - Holds location and size information about a bit field.
Daniel Dunbarc7a984a2010-04-06 01:07:41 +000061 typedef std::pair<const FieldDecl *, CGBitFieldInfo> LLVMBitFieldInfo;
Daniel Dunbar270e2032010-03-31 00:11:27 +000062 llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
63
Anders Carlssonc6772ce2010-05-18 05:22:06 +000064 typedef std::pair<const CXXRecordDecl *, unsigned> LLVMBaseInfo;
65 llvm::SmallVector<LLVMBaseInfo, 16> LLVMNonVirtualBases;
66
John McCallf16aa102010-08-22 21:01:12 +000067 /// IsZeroInitializable - Whether this struct can be C++
68 /// zero-initialized with an LLVM zeroinitializer.
69 bool IsZeroInitializable;
Daniel Dunbar270e2032010-03-31 00:11:27 +000070
71 /// Packed - Whether the resulting LLVM struct will be packed or not.
72 bool Packed;
73
74private:
75 CodeGenTypes &Types;
76
77 /// Alignment - Contains the alignment of the RecordDecl.
78 //
79 // FIXME: This is not needed and should be removed.
80 unsigned Alignment;
81
82 /// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the
83 /// LLVM types.
84 unsigned AlignmentAsLLVMStruct;
85
86 /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
87 /// this will have the number of bits still available in the field.
88 char BitsAvailableInLastField;
89
90 /// NextFieldOffsetInBytes - Holds the next field offset in bytes.
91 uint64_t NextFieldOffsetInBytes;
92
Anders Carlsson86664462010-04-17 20:49:27 +000093 /// LayoutUnionField - Will layout a field in an union and return the type
94 /// that the field will have.
95 const llvm::Type *LayoutUnionField(const FieldDecl *Field,
96 const ASTRecordLayout &Layout);
97
Daniel Dunbar270e2032010-03-31 00:11:27 +000098 /// LayoutUnion - Will layout a union RecordDecl.
99 void LayoutUnion(const RecordDecl *D);
100
101 /// LayoutField - try to layout all fields in the record decl.
102 /// Returns false if the operation failed because the struct is not packed.
103 bool LayoutFields(const RecordDecl *D);
104
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000105 /// LayoutNonVirtualBase - layout a single non-virtual base.
106 void LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
107 uint64_t BaseOffset);
108
109 /// LayoutNonVirtualBases - layout the non-virtual bases of a record decl.
110 void LayoutNonVirtualBases(const CXXRecordDecl *RD,
111 const ASTRecordLayout &Layout);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000112
Anders Carlsson3d155e62010-11-09 05:25:47 +0000113 /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
114 void ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
115
Daniel Dunbar270e2032010-03-31 00:11:27 +0000116 /// LayoutField - layout a single field. Returns false if the operation failed
117 /// because the current struct is not packed.
118 bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
119
120 /// LayoutBitField - layout a single bit field.
121 void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
122
123 /// AppendField - Appends a field with the given offset and type.
124 void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
125
Daniel Dunbar270e2032010-03-31 00:11:27 +0000126 /// AppendPadding - Appends enough padding bytes so that the total
127 /// struct size is a multiple of the field alignment.
128 void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment);
129
Anders Carlsson3d155e62010-11-09 05:25:47 +0000130 /// getByteArrayType - Returns a byte array type with the given number of
131 /// elements.
132 const llvm::Type *getByteArrayType(uint64_t NumBytes);
133
Daniel Dunbar270e2032010-03-31 00:11:27 +0000134 /// AppendBytes - Append a given number of bytes to the record.
135 void AppendBytes(uint64_t NumBytes);
136
137 /// AppendTailPadding - Append enough tail padding so that the type will have
138 /// the passed size.
139 void AppendTailPadding(uint64_t RecordSize);
140
141 unsigned getTypeAlignment(const llvm::Type *Ty) const;
Daniel Dunbar270e2032010-03-31 00:11:27 +0000142
Anders Carlssonf4a15b02010-11-21 23:56:06 +0000143 /// getCGRecordLayout - Return the CGRecordLayout for the given record.
144 const CGRecordLayout &getCGRecordLayout(const CXXRecordDecl *RD);
145
John McCallf16aa102010-08-22 21:01:12 +0000146 /// CheckZeroInitializable - Check if the given type contains a pointer
Daniel Dunbar270e2032010-03-31 00:11:27 +0000147 /// to data member.
John McCallf16aa102010-08-22 21:01:12 +0000148 void CheckZeroInitializable(QualType T);
149 void CheckZeroInitializable(const CXXRecordDecl *RD);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000150
151public:
152 CGRecordLayoutBuilder(CodeGenTypes &Types)
Anders Carlsson3d155e62010-11-09 05:25:47 +0000153 : NonVirtualBaseTypeIsSameAsCompleteType(false), IsZeroInitializable(true),
154 Packed(false), Types(Types), Alignment(0), AlignmentAsLLVMStruct(1),
Daniel Dunbar270e2032010-03-31 00:11:27 +0000155 BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { }
156
157 /// Layout - Will layout a RecordDecl.
158 void Layout(const RecordDecl *D);
159};
160
161}
162}
163
Anders Carlsson45372a62009-07-23 03:17:50 +0000164void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
Anders Carlssona5dd7222009-08-08 19:38:24 +0000165 Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8;
Anders Carlssond0eb3b92009-09-02 17:51:33 +0000166 Packed = D->hasAttr<PackedAttr>();
Anders Carlssona5dd7222009-08-08 19:38:24 +0000167
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000168 if (D->isUnion()) {
169 LayoutUnion(D);
170 return;
171 }
Anders Carlssona860e752009-08-08 18:23:56 +0000172
Anders Carlsson45372a62009-07-23 03:17:50 +0000173 if (LayoutFields(D))
174 return;
Mike Stump1eb44332009-09-09 15:08:12 +0000175
Anders Carlsson45372a62009-07-23 03:17:50 +0000176 // We weren't able to layout the struct. Try again with a packed struct
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000177 Packed = true;
Anders Carlsson45372a62009-07-23 03:17:50 +0000178 AlignmentAsLLVMStruct = 1;
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000179 NextFieldOffsetInBytes = 0;
Anders Carlsson45372a62009-07-23 03:17:50 +0000180 FieldTypes.clear();
Anders Carlsson45372a62009-07-23 03:17:50 +0000181 LLVMFields.clear();
182 LLVMBitFields.clear();
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000183 LLVMNonVirtualBases.clear();
Mike Stump1eb44332009-09-09 15:08:12 +0000184
Anders Carlsson45372a62009-07-23 03:17:50 +0000185 LayoutFields(D);
186}
187
Daniel Dunbare7a80bd2010-09-02 23:53:28 +0000188CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
189 const FieldDecl *FD,
190 uint64_t FieldOffset,
191 uint64_t FieldSize,
192 uint64_t ContainingTypeSizeInBits,
193 unsigned ContainingTypeAlign) {
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000194 const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
Daniel Dunbarab970f92010-04-13 20:58:55 +0000195 uint64_t TypeSizeInBytes = Types.getTargetData().getTypeAllocSize(Ty);
196 uint64_t TypeSizeInBits = TypeSizeInBytes * 8;
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000197
198 bool IsSigned = FD->getType()->isSignedIntegerType();
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000199
Anders Carlsson1c7658f2010-04-16 16:23:02 +0000200 if (FieldSize > TypeSizeInBits) {
Anders Carlsson6ba38152010-04-17 22:54:57 +0000201 // We have a wide bit-field. The extra bits are only used for padding, so
202 // if we have a bitfield of type T, with size N:
203 //
204 // T t : N;
205 //
206 // We can just assume that it's:
207 //
208 // T t : sizeof(T);
209 //
210 FieldSize = TypeSizeInBits;
Anders Carlsson1c7658f2010-04-16 16:23:02 +0000211 }
212
Daniel Dunbare1467a42010-04-22 02:35:46 +0000213 // Compute the access components. The policy we use is to start by attempting
214 // to access using the width of the bit-field type itself and to always access
215 // at aligned indices of that type. If such an access would fail because it
216 // extends past the bound of the type, then we reduce size to the next smaller
217 // power of two and retry. The current algorithm assumes pow2 sized types,
218 // although this is easy to fix.
219 //
220 // FIXME: This algorithm is wrong on big-endian systems, I think.
221 assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
222 CGBitFieldInfo::AccessInfo Components[3];
223 unsigned NumComponents = 0;
224 unsigned AccessedTargetBits = 0; // The tumber of target bits accessed.
225 unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
Anders Carlsson1c7658f2010-04-16 16:23:02 +0000226
Daniel Dunbare1467a42010-04-22 02:35:46 +0000227 // Round down from the field offset to find the first access position that is
228 // at an aligned offset of the initial access type.
Daniel Dunbar52968a12010-04-22 15:22:33 +0000229 uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
230
231 // Adjust initial access size to fit within record.
232 while (AccessWidth > 8 &&
233 AccessStart + AccessWidth > ContainingTypeSizeInBits) {
234 AccessWidth >>= 1;
235 AccessStart = FieldOffset - (FieldOffset % AccessWidth);
236 }
Daniel Dunbar2df25692010-04-15 05:09:32 +0000237
Daniel Dunbare1467a42010-04-22 02:35:46 +0000238 while (AccessedTargetBits < FieldSize) {
239 // Check that we can access using a type of this size, without reading off
240 // the end of the structure. This can occur with packed structures and
241 // -fno-bitfield-type-align, for example.
242 if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
243 // If so, reduce access size to the next smaller power-of-two and retry.
244 AccessWidth >>= 1;
245 assert(AccessWidth >= 8 && "Cannot access under byte size!");
246 continue;
247 }
Daniel Dunbarab970f92010-04-13 20:58:55 +0000248
Daniel Dunbare1467a42010-04-22 02:35:46 +0000249 // Otherwise, add an access component.
Daniel Dunbarab970f92010-04-13 20:58:55 +0000250
Daniel Dunbare1467a42010-04-22 02:35:46 +0000251 // First, compute the bits inside this access which are part of the
252 // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
253 // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
254 // in the target that we are reading.
Daniel Dunbar52968a12010-04-22 15:22:33 +0000255 assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
256 assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
Daniel Dunbare1467a42010-04-22 02:35:46 +0000257 uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
258 uint64_t AccessBitsInFieldSize =
Daniel Dunbar52968a12010-04-22 15:22:33 +0000259 std::min(AccessWidth + AccessStart,
260 FieldOffset + FieldSize) - AccessBitsInFieldStart;
Daniel Dunbar4651efb2010-04-22 14:56:10 +0000261
Daniel Dunbare1467a42010-04-22 02:35:46 +0000262 assert(NumComponents < 3 && "Unexpected number of components!");
263 CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
264 AI.FieldIndex = 0;
265 // FIXME: We still follow the old access pattern of only using the field
266 // byte offset. We should switch this once we fix the struct layout to be
267 // pretty.
268 AI.FieldByteOffset = AccessStart / 8;
269 AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
270 AI.AccessWidth = AccessWidth;
Daniel Dunbar89da8742010-04-22 03:17:04 +0000271 AI.AccessAlignment = llvm::MinAlign(ContainingTypeAlign, AccessStart) / 8;
Daniel Dunbare1467a42010-04-22 02:35:46 +0000272 AI.TargetBitOffset = AccessedTargetBits;
273 AI.TargetBitWidth = AccessBitsInFieldSize;
274
275 AccessStart += AccessWidth;
276 AccessedTargetBits += AI.TargetBitWidth;
Daniel Dunbarab970f92010-04-13 20:58:55 +0000277 }
278
Daniel Dunbare1467a42010-04-22 02:35:46 +0000279 assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
Daniel Dunbar2df25692010-04-15 05:09:32 +0000280 return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000281}
282
Daniel Dunbare7a80bd2010-09-02 23:53:28 +0000283CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
284 const FieldDecl *FD,
285 uint64_t FieldOffset,
286 uint64_t FieldSize) {
287 const RecordDecl *RD = FD->getParent();
288 const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
289 uint64_t ContainingTypeSizeInBits = RL.getSize();
290 unsigned ContainingTypeAlign = RL.getAlignment();
291
292 return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
293 ContainingTypeAlign);
294}
295
Anders Carlsson45372a62009-07-23 03:17:50 +0000296void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
297 uint64_t FieldOffset) {
Mike Stump1eb44332009-09-09 15:08:12 +0000298 uint64_t FieldSize =
Anders Carlsson45372a62009-07-23 03:17:50 +0000299 D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
Mike Stump1eb44332009-09-09 15:08:12 +0000300
Anders Carlsson45372a62009-07-23 03:17:50 +0000301 if (FieldSize == 0)
302 return;
303
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000304 uint64_t NextFieldOffset = NextFieldOffsetInBytes * 8;
Anders Carlsson45372a62009-07-23 03:17:50 +0000305 unsigned NumBytesToAppend;
Mike Stump1eb44332009-09-09 15:08:12 +0000306
Anders Carlsson45372a62009-07-23 03:17:50 +0000307 if (FieldOffset < NextFieldOffset) {
308 assert(BitsAvailableInLastField && "Bitfield size mismatch!");
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000309 assert(NextFieldOffsetInBytes && "Must have laid out at least one byte!");
Mike Stump1eb44332009-09-09 15:08:12 +0000310
Anders Carlsson45372a62009-07-23 03:17:50 +0000311 // The bitfield begins in the previous bit-field.
Mike Stump1eb44332009-09-09 15:08:12 +0000312 NumBytesToAppend =
Anders Carlsson45372a62009-07-23 03:17:50 +0000313 llvm::RoundUpToAlignment(FieldSize - BitsAvailableInLastField, 8) / 8;
314 } else {
315 assert(FieldOffset % 8 == 0 && "Field offset not aligned correctly");
316
317 // Append padding if necessary.
318 AppendBytes((FieldOffset - NextFieldOffset) / 8);
Mike Stump1eb44332009-09-09 15:08:12 +0000319
320 NumBytesToAppend =
Anders Carlsson45372a62009-07-23 03:17:50 +0000321 llvm::RoundUpToAlignment(FieldSize, 8) / 8;
Mike Stump1eb44332009-09-09 15:08:12 +0000322
Anders Carlsson45372a62009-07-23 03:17:50 +0000323 assert(NumBytesToAppend && "No bytes to append!");
324 }
325
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000326 // Add the bit field info.
327 LLVMBitFields.push_back(
Daniel Dunbare7a80bd2010-09-02 23:53:28 +0000328 LLVMBitFieldInfo(D, CGBitFieldInfo::MakeInfo(Types, D, FieldOffset,
329 FieldSize)));
Mike Stump1eb44332009-09-09 15:08:12 +0000330
Anders Carlsson45372a62009-07-23 03:17:50 +0000331 AppendBytes(NumBytesToAppend);
Mike Stump1eb44332009-09-09 15:08:12 +0000332
Mike Stump1eb44332009-09-09 15:08:12 +0000333 BitsAvailableInLastField =
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000334 NextFieldOffsetInBytes * 8 - (FieldOffset + FieldSize);
Anders Carlsson45372a62009-07-23 03:17:50 +0000335}
336
337bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
338 uint64_t FieldOffset) {
Anders Carlsson45372a62009-07-23 03:17:50 +0000339 // If the field is packed, then we need a packed struct.
Anders Carlssona860e752009-08-08 18:23:56 +0000340 if (!Packed && D->hasAttr<PackedAttr>())
Anders Carlsson45372a62009-07-23 03:17:50 +0000341 return false;
342
343 if (D->isBitField()) {
344 // We must use packed structs for unnamed bit fields since they
345 // don't affect the struct alignment.
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000346 if (!Packed && !D->getDeclName())
Anders Carlsson45372a62009-07-23 03:17:50 +0000347 return false;
Mike Stump1eb44332009-09-09 15:08:12 +0000348
Anders Carlsson45372a62009-07-23 03:17:50 +0000349 LayoutBitField(D, FieldOffset);
350 return true;
351 }
Mike Stump1eb44332009-09-09 15:08:12 +0000352
John McCallf16aa102010-08-22 21:01:12 +0000353 CheckZeroInitializable(D->getType());
Daniel Dunbar270e2032010-03-31 00:11:27 +0000354
Anders Carlsson45372a62009-07-23 03:17:50 +0000355 assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
Anders Carlsson45372a62009-07-23 03:17:50 +0000356 uint64_t FieldOffsetInBytes = FieldOffset / 8;
Mike Stump1eb44332009-09-09 15:08:12 +0000357
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000358 const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
359 unsigned TypeAlignment = getTypeAlignment(Ty);
360
Anders Carlssona5dd7222009-08-08 19:38:24 +0000361 // If the type alignment is larger then the struct alignment, we must use
362 // a packed struct.
363 if (TypeAlignment > Alignment) {
364 assert(!Packed && "Alignment is wrong even with packed struct!");
365 return false;
366 }
Mike Stump1eb44332009-09-09 15:08:12 +0000367
Anders Carlssona5dd7222009-08-08 19:38:24 +0000368 if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
369 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
Daniel Dunbar8a2c92c2010-05-27 01:12:46 +0000370 if (const MaxFieldAlignmentAttr *MFAA =
371 RD->getAttr<MaxFieldAlignmentAttr>()) {
372 if (MFAA->getAlignment() != TypeAlignment * 8 && !Packed)
Anders Carlssona5dd7222009-08-08 19:38:24 +0000373 return false;
374 }
375 }
376
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000377 // Round up the field offset to the alignment of the field type.
Mike Stump1eb44332009-09-09 15:08:12 +0000378 uint64_t AlignedNextFieldOffsetInBytes =
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000379 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, TypeAlignment);
380
381 if (FieldOffsetInBytes < AlignedNextFieldOffsetInBytes) {
382 assert(!Packed && "Could not place field even with packed struct!");
383 return false;
384 }
Mike Stump1eb44332009-09-09 15:08:12 +0000385
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000386 if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
387 // Even with alignment, the field offset is not at the right place,
388 // insert padding.
389 uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
Mike Stump1eb44332009-09-09 15:08:12 +0000390
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000391 AppendBytes(PaddingInBytes);
392 }
Mike Stump1eb44332009-09-09 15:08:12 +0000393
Anders Carlsson45372a62009-07-23 03:17:50 +0000394 // Now append the field.
395 LLVMFields.push_back(LLVMFieldInfo(D, FieldTypes.size()));
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000396 AppendField(FieldOffsetInBytes, Ty);
Mike Stump1eb44332009-09-09 15:08:12 +0000397
Anders Carlsson45372a62009-07-23 03:17:50 +0000398 return true;
399}
400
Anders Carlsson86664462010-04-17 20:49:27 +0000401const llvm::Type *
402CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
403 const ASTRecordLayout &Layout) {
404 if (Field->isBitField()) {
405 uint64_t FieldSize =
406 Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
407
408 // Ignore zero sized bit fields.
409 if (FieldSize == 0)
410 return 0;
411
Daniel Dunbar8ab78a72010-04-20 17:52:30 +0000412 const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
413 unsigned NumBytesToAppend =
414 llvm::RoundUpToAlignment(FieldSize, 8) / 8;
Anders Carlssond62328e2010-04-17 21:04:52 +0000415
Daniel Dunbar8ab78a72010-04-20 17:52:30 +0000416 if (NumBytesToAppend > 1)
417 FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend);
Anders Carlssond62328e2010-04-17 21:04:52 +0000418
Anders Carlsson86664462010-04-17 20:49:27 +0000419 // Add the bit field info.
420 LLVMBitFields.push_back(
Daniel Dunbare7a80bd2010-09-02 23:53:28 +0000421 LLVMBitFieldInfo(Field, CGBitFieldInfo::MakeInfo(Types, Field,
422 0, FieldSize)));
Anders Carlssond62328e2010-04-17 21:04:52 +0000423 return FieldTy;
Anders Carlsson86664462010-04-17 20:49:27 +0000424 }
Daniel Dunbar8ab78a72010-04-20 17:52:30 +0000425
Anders Carlsson86664462010-04-17 20:49:27 +0000426 // This is a regular union field.
427 LLVMFields.push_back(LLVMFieldInfo(Field, 0));
428 return Types.ConvertTypeForMemRecursive(Field->getType());
429}
430
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000431void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
432 assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
Mike Stump1eb44332009-09-09 15:08:12 +0000433
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000434 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
Mike Stump1eb44332009-09-09 15:08:12 +0000435
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000436 const llvm::Type *Ty = 0;
437 uint64_t Size = 0;
438 unsigned Align = 0;
Mike Stump1eb44332009-09-09 15:08:12 +0000439
Anders Carlsson21fd7d72010-01-28 18:22:03 +0000440 bool HasOnlyZeroSizedBitFields = true;
Daniel Dunbar270e2032010-03-31 00:11:27 +0000441
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000442 unsigned FieldNo = 0;
Mike Stump1eb44332009-09-09 15:08:12 +0000443 for (RecordDecl::field_iterator Field = D->field_begin(),
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000444 FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
Mike Stump1eb44332009-09-09 15:08:12 +0000445 assert(Layout.getFieldOffset(FieldNo) == 0 &&
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000446 "Union field offset did not start at the beginning of record!");
Anders Carlsson86664462010-04-17 20:49:27 +0000447 const llvm::Type *FieldTy = LayoutUnionField(*Field, Layout);
Anders Carlsson2cc8f172009-07-23 04:00:39 +0000448
Anders Carlsson86664462010-04-17 20:49:27 +0000449 if (!FieldTy)
450 continue;
Mike Stump1eb44332009-09-09 15:08:12 +0000451
Anders Carlsson21fd7d72010-01-28 18:22:03 +0000452 HasOnlyZeroSizedBitFields = false;
Daniel Dunbar270e2032010-03-31 00:11:27 +0000453
Anders Carlsson177d4d82009-07-23 21:52:03 +0000454 unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
455 uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
Mike Stump1eb44332009-09-09 15:08:12 +0000456
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000457 if (FieldAlign < Align)
458 continue;
Mike Stump1eb44332009-09-09 15:08:12 +0000459
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000460 if (FieldAlign > Align || FieldSize > Size) {
461 Ty = FieldTy;
462 Align = FieldAlign;
463 Size = FieldSize;
464 }
465 }
Mike Stump1eb44332009-09-09 15:08:12 +0000466
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000467 // Now add our field.
Anders Carlsson36620002009-09-03 22:56:02 +0000468 if (Ty) {
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000469 AppendField(0, Ty);
Anders Carlsson36620002009-09-03 22:56:02 +0000470
471 if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) {
472 // We need a packed struct.
473 Packed = true;
474 Align = 1;
475 }
476 }
Fariborz Jahaniane5041702009-11-06 20:47:40 +0000477 if (!Align) {
Anders Carlsson21fd7d72010-01-28 18:22:03 +0000478 assert(HasOnlyZeroSizedBitFields &&
479 "0-align record did not have all zero-sized bit-fields!");
Fariborz Jahaniane5041702009-11-06 20:47:40 +0000480 Align = 1;
481 }
Daniel Dunbar270e2032010-03-31 00:11:27 +0000482
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000483 // Append tail padding.
484 if (Layout.getSize() / 8 > Size)
485 AppendPadding(Layout.getSize() / 8, Align);
486}
487
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000488void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
489 uint64_t BaseOffset) {
490 const ASTRecordLayout &Layout =
491 Types.getContext().getASTRecordLayout(BaseDecl);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000492
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000493 uint64_t NonVirtualSize = Layout.getNonVirtualSize();
494
495 if (BaseDecl->isEmpty()) {
496 // FIXME: Lay out empty bases.
497 return;
498 }
499
John McCallf16aa102010-08-22 21:01:12 +0000500 CheckZeroInitializable(BaseDecl);
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000501
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000502 // FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can.
503 AppendPadding(BaseOffset / 8, 1);
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000504
505 // Append the base field.
506 LLVMNonVirtualBases.push_back(LLVMBaseInfo(BaseDecl, FieldTypes.size()));
507
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000508 AppendBytes(NonVirtualSize / 8);
509}
510
511void
512CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
513 const ASTRecordLayout &Layout) {
514 const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
515
516 // Check if we need to add a vtable pointer.
517 if (RD->isDynamicClass()) {
518 if (!PrimaryBase) {
519 const llvm::Type *FunctionType =
520 llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
521 /*isVarArg=*/true);
522 const llvm::Type *VTableTy = FunctionType->getPointerTo();
523
524 assert(NextFieldOffsetInBytes == 0 &&
525 "VTable pointer must come first!");
526 AppendField(NextFieldOffsetInBytes, VTableTy->getPointerTo());
527 } else {
528 // FIXME: Handle a virtual primary base.
529 if (!Layout.getPrimaryBaseWasVirtual())
530 LayoutNonVirtualBase(PrimaryBase, 0);
531 }
532 }
533
534 // Layout the non-virtual bases.
535 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
536 E = RD->bases_end(); I != E; ++I) {
537 if (I->isVirtual())
538 continue;
539
540 const CXXRecordDecl *BaseDecl =
541 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
542
543 // We've already laid out the primary base.
544 if (BaseDecl == PrimaryBase && !Layout.getPrimaryBaseWasVirtual())
545 continue;
546
Anders Carlssona14f5972010-10-31 23:22:37 +0000547 LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffsetInBits(BaseDecl));
Anders Carlsson4b3e5be2009-12-16 17:27:20 +0000548 }
549}
550
Anders Carlsson3d155e62010-11-09 05:25:47 +0000551void
552CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
553 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
554
555 uint64_t AlignedNonVirtualTypeSize =
556 llvm::RoundUpToAlignment(Layout.getNonVirtualSize(),
557 Layout.getNonVirtualAlign()) / 8;
558
559
560 // First check if we can use the same fields as for the complete class.
561 if (AlignedNonVirtualTypeSize == Layout.getSize() / 8) {
562 NonVirtualBaseTypeIsSameAsCompleteType = true;
563 return;
564 }
565
566 NonVirtualBaseFieldTypes = FieldTypes;
567
568 // Check if we need padding.
569 uint64_t AlignedNextFieldOffset =
570 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct);
571
572 assert(AlignedNextFieldOffset <= AlignedNonVirtualTypeSize &&
573 "Size mismatch!");
574
575 if (AlignedNonVirtualTypeSize == AlignedNextFieldOffset) {
576 // We don't need any padding.
577 return;
578 }
579
580 uint64_t NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
581 NonVirtualBaseFieldTypes.push_back(getByteArrayType(NumBytes));
Anders Carlsson3d155e62010-11-09 05:25:47 +0000582}
583
Anders Carlsson45372a62009-07-23 03:17:50 +0000584bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
585 assert(!D->isUnion() && "Can't call LayoutFields on a union!");
Anders Carlssona5dd7222009-08-08 19:38:24 +0000586 assert(Alignment && "Did not set alignment!");
Mike Stump1eb44332009-09-09 15:08:12 +0000587
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000588 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
Mike Stump1eb44332009-09-09 15:08:12 +0000589
Anders Carlsson3d155e62010-11-09 05:25:47 +0000590 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
591 if (RD)
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000592 LayoutNonVirtualBases(RD, Layout);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000593
Anders Carlsson45372a62009-07-23 03:17:50 +0000594 unsigned FieldNo = 0;
Fariborz Jahaniancad86652009-07-27 20:57:45 +0000595
Mike Stump1eb44332009-09-09 15:08:12 +0000596 for (RecordDecl::field_iterator Field = D->field_begin(),
Anders Carlsson45372a62009-07-23 03:17:50 +0000597 FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
598 if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
Mike Stump1eb44332009-09-09 15:08:12 +0000599 assert(!Packed &&
Anders Carlsson45372a62009-07-23 03:17:50 +0000600 "Could not layout fields even with a packed LLVM struct!");
601 return false;
602 }
603 }
604
Anders Carlsson3d155e62010-11-09 05:25:47 +0000605 // We've laid out the non-virtual bases and the fields, now compute the
606 // non-virtual base field types.
607 if (RD)
608 ComputeNonVirtualBaseType(RD);
609
610 // FIXME: Lay out the virtual bases instead of just treating them as tail
611 // padding.
612
Anders Carlsson45372a62009-07-23 03:17:50 +0000613 // Append tail padding if necessary.
Anders Carlssonc1efe362009-07-27 14:55:54 +0000614 AppendTailPadding(Layout.getSize());
Mike Stump1eb44332009-09-09 15:08:12 +0000615
Anders Carlsson45372a62009-07-23 03:17:50 +0000616 return true;
617}
618
Anders Carlssonc1efe362009-07-27 14:55:54 +0000619void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
620 assert(RecordSize % 8 == 0 && "Invalid record size!");
Mike Stump1eb44332009-09-09 15:08:12 +0000621
Anders Carlssonc1efe362009-07-27 14:55:54 +0000622 uint64_t RecordSizeInBytes = RecordSize / 8;
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000623 assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
Mike Stump1eb44332009-09-09 15:08:12 +0000624
Daniel Dunbar270e2032010-03-31 00:11:27 +0000625 uint64_t AlignedNextFieldOffset =
Anders Carlssonc2456822009-12-08 01:24:23 +0000626 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct);
627
628 if (AlignedNextFieldOffset == RecordSizeInBytes) {
629 // We don't need any padding.
630 return;
631 }
Daniel Dunbar270e2032010-03-31 00:11:27 +0000632
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000633 unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
Anders Carlssonc1efe362009-07-27 14:55:54 +0000634 AppendBytes(NumPadBytes);
635}
636
Mike Stump1eb44332009-09-09 15:08:12 +0000637void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
Anders Carlsson45372a62009-07-23 03:17:50 +0000638 const llvm::Type *FieldTy) {
639 AlignmentAsLLVMStruct = std::max(AlignmentAsLLVMStruct,
640 getTypeAlignment(FieldTy));
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000641
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000642 uint64_t FieldSizeInBytes = Types.getTargetData().getTypeAllocSize(FieldTy);
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000643
Anders Carlsson45372a62009-07-23 03:17:50 +0000644 FieldTypes.push_back(FieldTy);
Anders Carlsson45372a62009-07-23 03:17:50 +0000645
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000646 NextFieldOffsetInBytes = FieldOffsetInBytes + FieldSizeInBytes;
Anders Carlsson45372a62009-07-23 03:17:50 +0000647 BitsAvailableInLastField = 0;
648}
649
Mike Stump1eb44332009-09-09 15:08:12 +0000650void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
Anders Carlsson45372a62009-07-23 03:17:50 +0000651 unsigned FieldAlignment) {
Anders Carlsson45372a62009-07-23 03:17:50 +0000652 assert(NextFieldOffsetInBytes <= FieldOffsetInBytes &&
653 "Incorrect field layout!");
Mike Stump1eb44332009-09-09 15:08:12 +0000654
Anders Carlsson45372a62009-07-23 03:17:50 +0000655 // Round up the field offset to the alignment of the field type.
Mike Stump1eb44332009-09-09 15:08:12 +0000656 uint64_t AlignedNextFieldOffsetInBytes =
Anders Carlsson45372a62009-07-23 03:17:50 +0000657 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
658
659 if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
660 // Even with alignment, the field offset is not at the right place,
661 // insert padding.
662 uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
663
664 AppendBytes(PaddingInBytes);
665 }
666}
667
Anders Carlsson3d155e62010-11-09 05:25:47 +0000668const llvm::Type *CGRecordLayoutBuilder::getByteArrayType(uint64_t NumBytes) {
669 assert(NumBytes != 0 && "Empty byte array's aren't allowed.");
Mike Stump1eb44332009-09-09 15:08:12 +0000670
Owen Anderson0032b272009-08-13 21:57:51 +0000671 const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
Anders Carlssonc1efe362009-07-27 14:55:54 +0000672 if (NumBytes > 1)
Anders Carlsson45372a62009-07-23 03:17:50 +0000673 Ty = llvm::ArrayType::get(Ty, NumBytes);
Mike Stump1eb44332009-09-09 15:08:12 +0000674
Anders Carlsson3d155e62010-11-09 05:25:47 +0000675 return Ty;
676}
677
678void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) {
679 if (NumBytes == 0)
680 return;
681
Anders Carlsson45372a62009-07-23 03:17:50 +0000682 // Append the padding field
Anders Carlsson3d155e62010-11-09 05:25:47 +0000683 AppendField(NextFieldOffsetInBytes, getByteArrayType(NumBytes));
Anders Carlsson45372a62009-07-23 03:17:50 +0000684}
685
686unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000687 if (Packed)
Anders Carlsson45372a62009-07-23 03:17:50 +0000688 return 1;
Mike Stump1eb44332009-09-09 15:08:12 +0000689
Anders Carlsson45372a62009-07-23 03:17:50 +0000690 return Types.getTargetData().getABITypeAlignment(Ty);
691}
692
Anders Carlssonf4a15b02010-11-21 23:56:06 +0000693const CGRecordLayout &
694CGRecordLayoutBuilder::getCGRecordLayout(const CXXRecordDecl *RD) {
695 // FIXME: It would be better if there was a way to explicitly compute the
696 // record layout instead of converting to a type.
697 Types.ConvertTagDeclType(RD);
698
699 return Types.getCGRecordLayout(RD);
700}
701
John McCallf16aa102010-08-22 21:01:12 +0000702void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000703 // This record already contains a member pointer.
John McCallf16aa102010-08-22 21:01:12 +0000704 if (!IsZeroInitializable)
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000705 return;
Mike Stump1eb44332009-09-09 15:08:12 +0000706
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000707 // Can only have member pointers if we're compiling C++.
708 if (!Types.getContext().getLangOptions().CPlusPlus)
709 return;
Mike Stump1eb44332009-09-09 15:08:12 +0000710
Anders Carlsson2c12d032010-02-02 05:17:25 +0000711 T = Types.getContext().getBaseElementType(T);
Mike Stump1eb44332009-09-09 15:08:12 +0000712
Anders Carlsson2c12d032010-02-02 05:17:25 +0000713 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
John McCallf16aa102010-08-22 21:01:12 +0000714 if (!Types.getCXXABI().isZeroInitializable(MPT))
715 IsZeroInitializable = false;
Anders Carlsson2c12d032010-02-02 05:17:25 +0000716 } else if (const RecordType *RT = T->getAs<RecordType>()) {
717 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
John McCallf16aa102010-08-22 21:01:12 +0000718 CheckZeroInitializable(RD);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000719 }
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000720}
721
John McCallf16aa102010-08-22 21:01:12 +0000722void CGRecordLayoutBuilder::CheckZeroInitializable(const CXXRecordDecl *RD) {
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000723 // This record already contains a member pointer.
John McCallf16aa102010-08-22 21:01:12 +0000724 if (!IsZeroInitializable)
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000725 return;
726
Anders Carlssonf4a15b02010-11-21 23:56:06 +0000727 const CGRecordLayout &Layout = getCGRecordLayout(RD);
John McCallf16aa102010-08-22 21:01:12 +0000728 if (!Layout.isZeroInitializable())
729 IsZeroInitializable = false;
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000730}
731
Daniel Dunbar270e2032010-03-31 00:11:27 +0000732CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
733 CGRecordLayoutBuilder Builder(*this);
Mike Stump1eb44332009-09-09 15:08:12 +0000734
Anders Carlsson45372a62009-07-23 03:17:50 +0000735 Builder.Layout(D);
Anders Carlsson4c98efd2009-07-24 15:20:52 +0000736
Daniel Dunbar270e2032010-03-31 00:11:27 +0000737 const llvm::Type *Ty = llvm::StructType::get(getLLVMContext(),
Owen Anderson47a434f2009-08-05 23:18:46 +0000738 Builder.FieldTypes,
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000739 Builder.Packed);
Mike Stump1eb44332009-09-09 15:08:12 +0000740
Anders Carlsson3d155e62010-11-09 05:25:47 +0000741 const llvm::Type *BaseTy = 0;
742 if (isa<CXXRecordDecl>(D)) {
743 if (Builder.NonVirtualBaseTypeIsSameAsCompleteType)
744 BaseTy = Ty;
745 else if (!Builder.NonVirtualBaseFieldTypes.empty())
746 BaseTy = llvm::StructType::get(getLLVMContext(),
747 Builder.NonVirtualBaseFieldTypes,
748 Builder.Packed);
749 }
750
Daniel Dunbar198bcb42010-03-31 01:09:11 +0000751 CGRecordLayout *RL =
Anders Carlsson3d155e62010-11-09 05:25:47 +0000752 new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable);
Daniel Dunbar198bcb42010-03-31 01:09:11 +0000753
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000754 // Add all the non-virtual base field numbers.
755 RL->NonVirtualBaseFields.insert(Builder.LLVMNonVirtualBases.begin(),
756 Builder.LLVMNonVirtualBases.end());
757
Anders Carlsson45372a62009-07-23 03:17:50 +0000758 // Add all the field numbers.
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000759 RL->FieldInfo.insert(Builder.LLVMFields.begin(),
760 Builder.LLVMFields.end());
Anders Carlsson45372a62009-07-23 03:17:50 +0000761
762 // Add bitfield info.
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000763 RL->BitFields.insert(Builder.LLVMBitFields.begin(),
764 Builder.LLVMBitFields.end());
Mike Stump1eb44332009-09-09 15:08:12 +0000765
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000766 // Dump the layout, if requested.
Daniel Dunbarab970f92010-04-13 20:58:55 +0000767 if (getContext().getLangOptions().DumpRecordLayouts) {
Daniel Dunbar8d8ab742010-04-19 20:44:53 +0000768 llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
Daniel Dunbarab970f92010-04-13 20:58:55 +0000769 llvm::errs() << "Record: ";
770 D->dump();
771 llvm::errs() << "\nLayout: ";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000772 RL->dump();
Daniel Dunbarab970f92010-04-13 20:58:55 +0000773 }
Daniel Dunbar93c62962010-04-12 18:14:18 +0000774
Daniel Dunbare1467a42010-04-22 02:35:46 +0000775#ifndef NDEBUG
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000776 // Verify that the computed LLVM struct size matches the AST layout size.
Anders Carlsson3d155e62010-11-09 05:25:47 +0000777 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
778
779 uint64_t TypeSizeInBits = Layout.getSize();
Daniel Dunbare1467a42010-04-22 02:35:46 +0000780 assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000781 "Type size mismatch!");
782
Anders Carlsson3d155e62010-11-09 05:25:47 +0000783 if (BaseTy) {
784 uint64_t AlignedNonVirtualTypeSizeInBits =
785 llvm::RoundUpToAlignment(Layout.getNonVirtualSize(),
786 Layout.getNonVirtualAlign());
787
788 assert(AlignedNonVirtualTypeSizeInBits ==
789 getTargetData().getTypeAllocSizeInBits(BaseTy) &&
790 "Type size mismatch!");
791 }
792
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000793 // Verify that the LLVM and AST field offsets agree.
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000794 const llvm::StructType *ST =
795 dyn_cast<llvm::StructType>(RL->getLLVMType());
796 const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
797
798 const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
799 RecordDecl::field_iterator it = D->field_begin();
800 for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
801 const FieldDecl *FD = *it;
Daniel Dunbare1467a42010-04-22 02:35:46 +0000802
803 // For non-bit-fields, just check that the LLVM struct offset matches the
804 // AST offset.
805 if (!FD->isBitField()) {
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000806 unsigned FieldNo = RL->getLLVMFieldNo(FD);
807 assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
808 "Invalid field offset!");
Daniel Dunbare1467a42010-04-22 02:35:46 +0000809 continue;
810 }
811
812 // Ignore unnamed bit-fields.
813 if (!FD->getDeclName())
814 continue;
815
816 const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
817 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
818 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
819
820 // Verify that every component access is within the structure.
821 uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
822 uint64_t AccessBitOffset = FieldOffset + AI.FieldByteOffset * 8;
823 assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
824 "Invalid bit-field access (out of range)!");
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000825 }
826 }
827#endif
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000828
Daniel Dunbar198bcb42010-03-31 01:09:11 +0000829 return RL;
Anders Carlsson45372a62009-07-23 03:17:50 +0000830}
Daniel Dunbar93c62962010-04-12 18:14:18 +0000831
832void CGRecordLayout::print(llvm::raw_ostream &OS) const {
833 OS << "<CGRecordLayout\n";
834 OS << " LLVMType:" << *LLVMType << "\n";
Anders Carlsson9a5a3f22010-11-21 23:59:45 +0000835 if (NonVirtualBaseLLVMType)
836 OS << " NonVirtualBaseLLVMType:" << *NonVirtualBaseLLVMType << "\n";
John McCallf16aa102010-08-22 21:01:12 +0000837 OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000838 OS << " BitFields:[\n";
Daniel Dunbarad759532010-04-22 02:35:36 +0000839
840 // Print bit-field infos in declaration order.
841 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
Daniel Dunbar93c62962010-04-12 18:14:18 +0000842 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
843 it = BitFields.begin(), ie = BitFields.end();
844 it != ie; ++it) {
Daniel Dunbarad759532010-04-22 02:35:36 +0000845 const RecordDecl *RD = it->first->getParent();
846 unsigned Index = 0;
847 for (RecordDecl::field_iterator
848 it2 = RD->field_begin(); *it2 != it->first; ++it2)
849 ++Index;
850 BFIs.push_back(std::make_pair(Index, &it->second));
851 }
852 llvm::array_pod_sort(BFIs.begin(), BFIs.end());
853 for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
Daniel Dunbarab970f92010-04-13 20:58:55 +0000854 OS.indent(4);
Daniel Dunbarad759532010-04-22 02:35:36 +0000855 BFIs[i].second->print(OS);
Daniel Dunbar93c62962010-04-12 18:14:18 +0000856 OS << "\n";
857 }
Daniel Dunbarad759532010-04-22 02:35:36 +0000858
Daniel Dunbar93c62962010-04-12 18:14:18 +0000859 OS << "]>\n";
860}
861
862void CGRecordLayout::dump() const {
863 print(llvm::errs());
864}
865
866void CGBitFieldInfo::print(llvm::raw_ostream &OS) const {
867 OS << "<CGBitFieldInfo";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000868 OS << " Size:" << Size;
Daniel Dunbarab970f92010-04-13 20:58:55 +0000869 OS << " IsSigned:" << IsSigned << "\n";
870
871 OS.indent(4 + strlen("<CGBitFieldInfo"));
872 OS << " NumComponents:" << getNumComponents();
873 OS << " Components: [";
874 if (getNumComponents()) {
875 OS << "\n";
876 for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
877 const AccessInfo &AI = getComponent(i);
878 OS.indent(8);
879 OS << "<AccessInfo"
880 << " FieldIndex:" << AI.FieldIndex
881 << " FieldByteOffset:" << AI.FieldByteOffset
882 << " FieldBitStart:" << AI.FieldBitStart
883 << " AccessWidth:" << AI.AccessWidth << "\n";
884 OS.indent(8 + strlen("<AccessInfo"));
885 OS << " AccessAlignment:" << AI.AccessAlignment
886 << " TargetBitOffset:" << AI.TargetBitOffset
887 << " TargetBitWidth:" << AI.TargetBitWidth
888 << ">\n";
889 }
890 OS.indent(4);
891 }
892 OS << "]>";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000893}
894
895void CGBitFieldInfo::dump() const {
896 print(llvm::errs());
897}