blob: 9e45df886e6a8b68bc51311a96636e44f4262c88 [file] [log] [blame]
Daniel Dunbar270e2032010-03-31 00:11:27 +00001//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
Anders Carlsson45372a62009-07-23 03:17:50 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
Daniel Dunbar270e2032010-03-31 00:11:27 +000010// Builder implementation for CGRecordLayout objects.
Anders Carlsson45372a62009-07-23 03:17:50 +000011//
12//===----------------------------------------------------------------------===//
13
Daniel Dunbar2924ade2010-03-30 22:26:10 +000014#include "CGRecordLayout.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000015#include "clang/AST/ASTContext.h"
16#include "clang/AST/Attr.h"
17#include "clang/AST/DeclCXX.h"
18#include "clang/AST/Expr.h"
19#include "clang/AST/RecordLayout.h"
20#include "CodeGenTypes.h"
John McCallf16aa102010-08-22 21:01:12 +000021#include "CGCXXABI.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000022#include "llvm/DerivedTypes.h"
Daniel Dunbar93c62962010-04-12 18:14:18 +000023#include "llvm/Type.h"
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +000024#include "llvm/Support/Debug.h"
Daniel Dunbar93c62962010-04-12 18:14:18 +000025#include "llvm/Support/raw_ostream.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000026#include "llvm/Target/TargetData.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000027using namespace clang;
28using namespace CodeGen;
29
Daniel Dunbar270e2032010-03-31 00:11:27 +000030namespace clang {
31namespace CodeGen {
32
33class CGRecordLayoutBuilder {
34public:
35 /// FieldTypes - Holds the LLVM types that the struct is created from.
36 std::vector<const llvm::Type *> FieldTypes;
37
Anders Carlsson3d155e62010-11-09 05:25:47 +000038 /// NonVirtualBaseFieldTypes - Holds the LLVM types for the non-virtual part
39 /// of the struct. For example, consider:
40 ///
41 /// struct A { int i; };
42 /// struct B { void *v; };
43 /// struct C : virtual A, B { };
44 ///
45 /// The LLVM type of C will be
46 /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
47 ///
48 /// And the LLVM type of the non-virtual base struct will be
49 /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
50 std::vector<const llvm::Type *> NonVirtualBaseFieldTypes;
51
52 /// NonVirtualBaseTypeIsSameAsCompleteType - Whether the non-virtual part of
53 /// the struct is equivalent to the complete struct.
54 bool NonVirtualBaseTypeIsSameAsCompleteType;
55
Daniel Dunbar270e2032010-03-31 00:11:27 +000056 /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number.
57 typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo;
58 llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields;
59
60 /// LLVMBitFieldInfo - Holds location and size information about a bit field.
Daniel Dunbarc7a984a2010-04-06 01:07:41 +000061 typedef std::pair<const FieldDecl *, CGBitFieldInfo> LLVMBitFieldInfo;
Daniel Dunbar270e2032010-03-31 00:11:27 +000062 llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
63
Anders Carlssonc6772ce2010-05-18 05:22:06 +000064 typedef std::pair<const CXXRecordDecl *, unsigned> LLVMBaseInfo;
65 llvm::SmallVector<LLVMBaseInfo, 16> LLVMNonVirtualBases;
66
John McCallf16aa102010-08-22 21:01:12 +000067 /// IsZeroInitializable - Whether this struct can be C++
68 /// zero-initialized with an LLVM zeroinitializer.
69 bool IsZeroInitializable;
Daniel Dunbar270e2032010-03-31 00:11:27 +000070
71 /// Packed - Whether the resulting LLVM struct will be packed or not.
72 bool Packed;
73
74private:
75 CodeGenTypes &Types;
76
77 /// Alignment - Contains the alignment of the RecordDecl.
78 //
79 // FIXME: This is not needed and should be removed.
80 unsigned Alignment;
81
82 /// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the
83 /// LLVM types.
84 unsigned AlignmentAsLLVMStruct;
85
86 /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
87 /// this will have the number of bits still available in the field.
88 char BitsAvailableInLastField;
89
90 /// NextFieldOffsetInBytes - Holds the next field offset in bytes.
91 uint64_t NextFieldOffsetInBytes;
92
Anders Carlsson86664462010-04-17 20:49:27 +000093 /// LayoutUnionField - Will layout a field in an union and return the type
94 /// that the field will have.
95 const llvm::Type *LayoutUnionField(const FieldDecl *Field,
96 const ASTRecordLayout &Layout);
97
Daniel Dunbar270e2032010-03-31 00:11:27 +000098 /// LayoutUnion - Will layout a union RecordDecl.
99 void LayoutUnion(const RecordDecl *D);
100
101 /// LayoutField - try to layout all fields in the record decl.
102 /// Returns false if the operation failed because the struct is not packed.
103 bool LayoutFields(const RecordDecl *D);
104
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000105 /// LayoutNonVirtualBase - layout a single non-virtual base.
106 void LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
107 uint64_t BaseOffset);
108
109 /// LayoutNonVirtualBases - layout the non-virtual bases of a record decl.
110 void LayoutNonVirtualBases(const CXXRecordDecl *RD,
111 const ASTRecordLayout &Layout);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000112
Anders Carlsson3d155e62010-11-09 05:25:47 +0000113 /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
114 void ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
115
Daniel Dunbar270e2032010-03-31 00:11:27 +0000116 /// LayoutField - layout a single field. Returns false if the operation failed
117 /// because the current struct is not packed.
118 bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
119
120 /// LayoutBitField - layout a single bit field.
121 void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
122
123 /// AppendField - Appends a field with the given offset and type.
124 void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
125
Daniel Dunbar270e2032010-03-31 00:11:27 +0000126 /// AppendPadding - Appends enough padding bytes so that the total
127 /// struct size is a multiple of the field alignment.
128 void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment);
129
Anders Carlsson3d155e62010-11-09 05:25:47 +0000130 /// getByteArrayType - Returns a byte array type with the given number of
131 /// elements.
132 const llvm::Type *getByteArrayType(uint64_t NumBytes);
133
Daniel Dunbar270e2032010-03-31 00:11:27 +0000134 /// AppendBytes - Append a given number of bytes to the record.
135 void AppendBytes(uint64_t NumBytes);
136
137 /// AppendTailPadding - Append enough tail padding so that the type will have
138 /// the passed size.
139 void AppendTailPadding(uint64_t RecordSize);
140
141 unsigned getTypeAlignment(const llvm::Type *Ty) const;
Daniel Dunbar270e2032010-03-31 00:11:27 +0000142
Anders Carlssonf4a15b02010-11-21 23:56:06 +0000143 /// getCGRecordLayout - Return the CGRecordLayout for the given record.
144 const CGRecordLayout &getCGRecordLayout(const CXXRecordDecl *RD);
145
John McCallf16aa102010-08-22 21:01:12 +0000146 /// CheckZeroInitializable - Check if the given type contains a pointer
Daniel Dunbar270e2032010-03-31 00:11:27 +0000147 /// to data member.
John McCallf16aa102010-08-22 21:01:12 +0000148 void CheckZeroInitializable(QualType T);
149 void CheckZeroInitializable(const CXXRecordDecl *RD);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000150
151public:
152 CGRecordLayoutBuilder(CodeGenTypes &Types)
Anders Carlsson3d155e62010-11-09 05:25:47 +0000153 : NonVirtualBaseTypeIsSameAsCompleteType(false), IsZeroInitializable(true),
154 Packed(false), Types(Types), Alignment(0), AlignmentAsLLVMStruct(1),
Daniel Dunbar270e2032010-03-31 00:11:27 +0000155 BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { }
156
157 /// Layout - Will layout a RecordDecl.
158 void Layout(const RecordDecl *D);
159};
160
161}
162}
163
Anders Carlsson45372a62009-07-23 03:17:50 +0000164void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
Anders Carlssona5dd7222009-08-08 19:38:24 +0000165 Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8;
Anders Carlssond0eb3b92009-09-02 17:51:33 +0000166 Packed = D->hasAttr<PackedAttr>();
Anders Carlssona5dd7222009-08-08 19:38:24 +0000167
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000168 if (D->isUnion()) {
169 LayoutUnion(D);
170 return;
171 }
Anders Carlssona860e752009-08-08 18:23:56 +0000172
Anders Carlsson45372a62009-07-23 03:17:50 +0000173 if (LayoutFields(D))
174 return;
Mike Stump1eb44332009-09-09 15:08:12 +0000175
Anders Carlsson45372a62009-07-23 03:17:50 +0000176 // We weren't able to layout the struct. Try again with a packed struct
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000177 Packed = true;
Anders Carlsson45372a62009-07-23 03:17:50 +0000178 AlignmentAsLLVMStruct = 1;
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000179 NextFieldOffsetInBytes = 0;
Anders Carlsson45372a62009-07-23 03:17:50 +0000180 FieldTypes.clear();
Anders Carlsson45372a62009-07-23 03:17:50 +0000181 LLVMFields.clear();
182 LLVMBitFields.clear();
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000183 LLVMNonVirtualBases.clear();
Mike Stump1eb44332009-09-09 15:08:12 +0000184
Anders Carlsson45372a62009-07-23 03:17:50 +0000185 LayoutFields(D);
186}
187
Daniel Dunbare7a80bd2010-09-02 23:53:28 +0000188CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
189 const FieldDecl *FD,
190 uint64_t FieldOffset,
191 uint64_t FieldSize,
192 uint64_t ContainingTypeSizeInBits,
193 unsigned ContainingTypeAlign) {
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000194 const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
Daniel Dunbarab970f92010-04-13 20:58:55 +0000195 uint64_t TypeSizeInBytes = Types.getTargetData().getTypeAllocSize(Ty);
196 uint64_t TypeSizeInBits = TypeSizeInBytes * 8;
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000197
198 bool IsSigned = FD->getType()->isSignedIntegerType();
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000199
Anders Carlsson1c7658f2010-04-16 16:23:02 +0000200 if (FieldSize > TypeSizeInBits) {
Anders Carlsson6ba38152010-04-17 22:54:57 +0000201 // We have a wide bit-field. The extra bits are only used for padding, so
202 // if we have a bitfield of type T, with size N:
203 //
204 // T t : N;
205 //
206 // We can just assume that it's:
207 //
208 // T t : sizeof(T);
209 //
210 FieldSize = TypeSizeInBits;
Anders Carlsson1c7658f2010-04-16 16:23:02 +0000211 }
212
Daniel Dunbare1467a42010-04-22 02:35:46 +0000213 // Compute the access components. The policy we use is to start by attempting
214 // to access using the width of the bit-field type itself and to always access
215 // at aligned indices of that type. If such an access would fail because it
216 // extends past the bound of the type, then we reduce size to the next smaller
217 // power of two and retry. The current algorithm assumes pow2 sized types,
218 // although this is easy to fix.
219 //
220 // FIXME: This algorithm is wrong on big-endian systems, I think.
221 assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
222 CGBitFieldInfo::AccessInfo Components[3];
223 unsigned NumComponents = 0;
224 unsigned AccessedTargetBits = 0; // The tumber of target bits accessed.
225 unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
Anders Carlsson1c7658f2010-04-16 16:23:02 +0000226
Daniel Dunbare1467a42010-04-22 02:35:46 +0000227 // Round down from the field offset to find the first access position that is
228 // at an aligned offset of the initial access type.
Daniel Dunbar52968a12010-04-22 15:22:33 +0000229 uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
230
231 // Adjust initial access size to fit within record.
232 while (AccessWidth > 8 &&
233 AccessStart + AccessWidth > ContainingTypeSizeInBits) {
234 AccessWidth >>= 1;
235 AccessStart = FieldOffset - (FieldOffset % AccessWidth);
236 }
Daniel Dunbar2df25692010-04-15 05:09:32 +0000237
Daniel Dunbare1467a42010-04-22 02:35:46 +0000238 while (AccessedTargetBits < FieldSize) {
239 // Check that we can access using a type of this size, without reading off
240 // the end of the structure. This can occur with packed structures and
241 // -fno-bitfield-type-align, for example.
242 if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
243 // If so, reduce access size to the next smaller power-of-two and retry.
244 AccessWidth >>= 1;
245 assert(AccessWidth >= 8 && "Cannot access under byte size!");
246 continue;
247 }
Daniel Dunbarab970f92010-04-13 20:58:55 +0000248
Daniel Dunbare1467a42010-04-22 02:35:46 +0000249 // Otherwise, add an access component.
Daniel Dunbarab970f92010-04-13 20:58:55 +0000250
Daniel Dunbare1467a42010-04-22 02:35:46 +0000251 // First, compute the bits inside this access which are part of the
252 // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
253 // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
254 // in the target that we are reading.
Daniel Dunbar52968a12010-04-22 15:22:33 +0000255 assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
256 assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
Daniel Dunbare1467a42010-04-22 02:35:46 +0000257 uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
258 uint64_t AccessBitsInFieldSize =
Daniel Dunbar52968a12010-04-22 15:22:33 +0000259 std::min(AccessWidth + AccessStart,
260 FieldOffset + FieldSize) - AccessBitsInFieldStart;
Daniel Dunbar4651efb2010-04-22 14:56:10 +0000261
Daniel Dunbare1467a42010-04-22 02:35:46 +0000262 assert(NumComponents < 3 && "Unexpected number of components!");
263 CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
264 AI.FieldIndex = 0;
265 // FIXME: We still follow the old access pattern of only using the field
266 // byte offset. We should switch this once we fix the struct layout to be
267 // pretty.
268 AI.FieldByteOffset = AccessStart / 8;
269 AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
270 AI.AccessWidth = AccessWidth;
Daniel Dunbar89da8742010-04-22 03:17:04 +0000271 AI.AccessAlignment = llvm::MinAlign(ContainingTypeAlign, AccessStart) / 8;
Daniel Dunbare1467a42010-04-22 02:35:46 +0000272 AI.TargetBitOffset = AccessedTargetBits;
273 AI.TargetBitWidth = AccessBitsInFieldSize;
274
275 AccessStart += AccessWidth;
276 AccessedTargetBits += AI.TargetBitWidth;
Daniel Dunbarab970f92010-04-13 20:58:55 +0000277 }
278
Daniel Dunbare1467a42010-04-22 02:35:46 +0000279 assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
Daniel Dunbar2df25692010-04-15 05:09:32 +0000280 return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000281}
282
Daniel Dunbare7a80bd2010-09-02 23:53:28 +0000283CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
284 const FieldDecl *FD,
285 uint64_t FieldOffset,
286 uint64_t FieldSize) {
287 const RecordDecl *RD = FD->getParent();
288 const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
289 uint64_t ContainingTypeSizeInBits = RL.getSize();
290 unsigned ContainingTypeAlign = RL.getAlignment();
291
292 return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
293 ContainingTypeAlign);
294}
295
Anders Carlsson45372a62009-07-23 03:17:50 +0000296void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
297 uint64_t FieldOffset) {
Mike Stump1eb44332009-09-09 15:08:12 +0000298 uint64_t FieldSize =
Anders Carlsson45372a62009-07-23 03:17:50 +0000299 D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
Mike Stump1eb44332009-09-09 15:08:12 +0000300
Anders Carlsson45372a62009-07-23 03:17:50 +0000301 if (FieldSize == 0)
302 return;
303
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000304 uint64_t NextFieldOffset = NextFieldOffsetInBytes * 8;
Anders Carlsson45372a62009-07-23 03:17:50 +0000305 unsigned NumBytesToAppend;
Mike Stump1eb44332009-09-09 15:08:12 +0000306
Anders Carlsson45372a62009-07-23 03:17:50 +0000307 if (FieldOffset < NextFieldOffset) {
308 assert(BitsAvailableInLastField && "Bitfield size mismatch!");
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000309 assert(NextFieldOffsetInBytes && "Must have laid out at least one byte!");
Mike Stump1eb44332009-09-09 15:08:12 +0000310
Anders Carlsson45372a62009-07-23 03:17:50 +0000311 // The bitfield begins in the previous bit-field.
Mike Stump1eb44332009-09-09 15:08:12 +0000312 NumBytesToAppend =
Anders Carlsson45372a62009-07-23 03:17:50 +0000313 llvm::RoundUpToAlignment(FieldSize - BitsAvailableInLastField, 8) / 8;
314 } else {
315 assert(FieldOffset % 8 == 0 && "Field offset not aligned correctly");
316
317 // Append padding if necessary.
318 AppendBytes((FieldOffset - NextFieldOffset) / 8);
Mike Stump1eb44332009-09-09 15:08:12 +0000319
320 NumBytesToAppend =
Anders Carlsson45372a62009-07-23 03:17:50 +0000321 llvm::RoundUpToAlignment(FieldSize, 8) / 8;
Mike Stump1eb44332009-09-09 15:08:12 +0000322
Anders Carlsson45372a62009-07-23 03:17:50 +0000323 assert(NumBytesToAppend && "No bytes to append!");
324 }
325
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000326 // Add the bit field info.
327 LLVMBitFields.push_back(
Daniel Dunbare7a80bd2010-09-02 23:53:28 +0000328 LLVMBitFieldInfo(D, CGBitFieldInfo::MakeInfo(Types, D, FieldOffset,
329 FieldSize)));
Mike Stump1eb44332009-09-09 15:08:12 +0000330
Anders Carlsson45372a62009-07-23 03:17:50 +0000331 AppendBytes(NumBytesToAppend);
Mike Stump1eb44332009-09-09 15:08:12 +0000332
Mike Stump1eb44332009-09-09 15:08:12 +0000333 BitsAvailableInLastField =
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000334 NextFieldOffsetInBytes * 8 - (FieldOffset + FieldSize);
Anders Carlsson45372a62009-07-23 03:17:50 +0000335}
336
337bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
338 uint64_t FieldOffset) {
Anders Carlsson45372a62009-07-23 03:17:50 +0000339 // If the field is packed, then we need a packed struct.
Anders Carlssona860e752009-08-08 18:23:56 +0000340 if (!Packed && D->hasAttr<PackedAttr>())
Anders Carlsson45372a62009-07-23 03:17:50 +0000341 return false;
342
343 if (D->isBitField()) {
344 // We must use packed structs for unnamed bit fields since they
345 // don't affect the struct alignment.
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000346 if (!Packed && !D->getDeclName())
Anders Carlsson45372a62009-07-23 03:17:50 +0000347 return false;
Mike Stump1eb44332009-09-09 15:08:12 +0000348
Anders Carlsson45372a62009-07-23 03:17:50 +0000349 LayoutBitField(D, FieldOffset);
350 return true;
351 }
Mike Stump1eb44332009-09-09 15:08:12 +0000352
John McCallf16aa102010-08-22 21:01:12 +0000353 CheckZeroInitializable(D->getType());
Daniel Dunbar270e2032010-03-31 00:11:27 +0000354
Anders Carlsson45372a62009-07-23 03:17:50 +0000355 assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
Anders Carlsson45372a62009-07-23 03:17:50 +0000356 uint64_t FieldOffsetInBytes = FieldOffset / 8;
Mike Stump1eb44332009-09-09 15:08:12 +0000357
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000358 const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
359 unsigned TypeAlignment = getTypeAlignment(Ty);
360
Anders Carlssona5dd7222009-08-08 19:38:24 +0000361 // If the type alignment is larger then the struct alignment, we must use
362 // a packed struct.
363 if (TypeAlignment > Alignment) {
364 assert(!Packed && "Alignment is wrong even with packed struct!");
365 return false;
366 }
Mike Stump1eb44332009-09-09 15:08:12 +0000367
Anders Carlssona5dd7222009-08-08 19:38:24 +0000368 if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
369 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
Daniel Dunbar8a2c92c2010-05-27 01:12:46 +0000370 if (const MaxFieldAlignmentAttr *MFAA =
371 RD->getAttr<MaxFieldAlignmentAttr>()) {
372 if (MFAA->getAlignment() != TypeAlignment * 8 && !Packed)
Anders Carlssona5dd7222009-08-08 19:38:24 +0000373 return false;
374 }
375 }
376
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000377 // Round up the field offset to the alignment of the field type.
Mike Stump1eb44332009-09-09 15:08:12 +0000378 uint64_t AlignedNextFieldOffsetInBytes =
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000379 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, TypeAlignment);
380
381 if (FieldOffsetInBytes < AlignedNextFieldOffsetInBytes) {
382 assert(!Packed && "Could not place field even with packed struct!");
383 return false;
384 }
Mike Stump1eb44332009-09-09 15:08:12 +0000385
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000386 if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
387 // Even with alignment, the field offset is not at the right place,
388 // insert padding.
389 uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
Mike Stump1eb44332009-09-09 15:08:12 +0000390
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000391 AppendBytes(PaddingInBytes);
392 }
Mike Stump1eb44332009-09-09 15:08:12 +0000393
Anders Carlsson45372a62009-07-23 03:17:50 +0000394 // Now append the field.
395 LLVMFields.push_back(LLVMFieldInfo(D, FieldTypes.size()));
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000396 AppendField(FieldOffsetInBytes, Ty);
Mike Stump1eb44332009-09-09 15:08:12 +0000397
Anders Carlsson45372a62009-07-23 03:17:50 +0000398 return true;
399}
400
Anders Carlsson86664462010-04-17 20:49:27 +0000401const llvm::Type *
402CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
403 const ASTRecordLayout &Layout) {
404 if (Field->isBitField()) {
405 uint64_t FieldSize =
406 Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
407
408 // Ignore zero sized bit fields.
409 if (FieldSize == 0)
410 return 0;
411
Daniel Dunbar8ab78a72010-04-20 17:52:30 +0000412 const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
413 unsigned NumBytesToAppend =
414 llvm::RoundUpToAlignment(FieldSize, 8) / 8;
Anders Carlssond62328e2010-04-17 21:04:52 +0000415
Daniel Dunbar8ab78a72010-04-20 17:52:30 +0000416 if (NumBytesToAppend > 1)
417 FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend);
Anders Carlssond62328e2010-04-17 21:04:52 +0000418
Anders Carlsson86664462010-04-17 20:49:27 +0000419 // Add the bit field info.
420 LLVMBitFields.push_back(
Daniel Dunbare7a80bd2010-09-02 23:53:28 +0000421 LLVMBitFieldInfo(Field, CGBitFieldInfo::MakeInfo(Types, Field,
422 0, FieldSize)));
Anders Carlssond62328e2010-04-17 21:04:52 +0000423 return FieldTy;
Anders Carlsson86664462010-04-17 20:49:27 +0000424 }
Daniel Dunbar8ab78a72010-04-20 17:52:30 +0000425
Anders Carlsson86664462010-04-17 20:49:27 +0000426 // This is a regular union field.
427 LLVMFields.push_back(LLVMFieldInfo(Field, 0));
428 return Types.ConvertTypeForMemRecursive(Field->getType());
429}
430
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000431void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
432 assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
Mike Stump1eb44332009-09-09 15:08:12 +0000433
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000434 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
Mike Stump1eb44332009-09-09 15:08:12 +0000435
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000436 const llvm::Type *Ty = 0;
437 uint64_t Size = 0;
438 unsigned Align = 0;
Mike Stump1eb44332009-09-09 15:08:12 +0000439
Anders Carlsson21fd7d72010-01-28 18:22:03 +0000440 bool HasOnlyZeroSizedBitFields = true;
Daniel Dunbar270e2032010-03-31 00:11:27 +0000441
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000442 unsigned FieldNo = 0;
Mike Stump1eb44332009-09-09 15:08:12 +0000443 for (RecordDecl::field_iterator Field = D->field_begin(),
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000444 FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
Mike Stump1eb44332009-09-09 15:08:12 +0000445 assert(Layout.getFieldOffset(FieldNo) == 0 &&
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000446 "Union field offset did not start at the beginning of record!");
Anders Carlsson86664462010-04-17 20:49:27 +0000447 const llvm::Type *FieldTy = LayoutUnionField(*Field, Layout);
Anders Carlsson2cc8f172009-07-23 04:00:39 +0000448
Anders Carlsson86664462010-04-17 20:49:27 +0000449 if (!FieldTy)
450 continue;
Mike Stump1eb44332009-09-09 15:08:12 +0000451
Anders Carlsson21fd7d72010-01-28 18:22:03 +0000452 HasOnlyZeroSizedBitFields = false;
Daniel Dunbar270e2032010-03-31 00:11:27 +0000453
Anders Carlsson177d4d82009-07-23 21:52:03 +0000454 unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
455 uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
Mike Stump1eb44332009-09-09 15:08:12 +0000456
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000457 if (FieldAlign < Align)
458 continue;
Mike Stump1eb44332009-09-09 15:08:12 +0000459
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000460 if (FieldAlign > Align || FieldSize > Size) {
461 Ty = FieldTy;
462 Align = FieldAlign;
463 Size = FieldSize;
464 }
465 }
Mike Stump1eb44332009-09-09 15:08:12 +0000466
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000467 // Now add our field.
Anders Carlsson36620002009-09-03 22:56:02 +0000468 if (Ty) {
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000469 AppendField(0, Ty);
Anders Carlsson36620002009-09-03 22:56:02 +0000470
471 if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) {
472 // We need a packed struct.
473 Packed = true;
474 Align = 1;
475 }
476 }
Fariborz Jahaniane5041702009-11-06 20:47:40 +0000477 if (!Align) {
Anders Carlsson21fd7d72010-01-28 18:22:03 +0000478 assert(HasOnlyZeroSizedBitFields &&
479 "0-align record did not have all zero-sized bit-fields!");
Fariborz Jahaniane5041702009-11-06 20:47:40 +0000480 Align = 1;
481 }
Daniel Dunbar270e2032010-03-31 00:11:27 +0000482
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000483 // Append tail padding.
484 if (Layout.getSize() / 8 > Size)
485 AppendPadding(Layout.getSize() / 8, Align);
486}
487
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000488void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
489 uint64_t BaseOffset) {
Anders Carlssona03613d2010-11-22 00:03:08 +0000490 // Ignore empty bases.
491 if (BaseDecl->isEmpty())
492 return;
493
494 CheckZeroInitializable(BaseDecl);
495
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000496 const ASTRecordLayout &Layout =
497 Types.getContext().getASTRecordLayout(BaseDecl);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000498
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000499 uint64_t NonVirtualSize = Layout.getNonVirtualSize();
500
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000501 // FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can.
502 AppendPadding(BaseOffset / 8, 1);
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000503
504 // Append the base field.
505 LLVMNonVirtualBases.push_back(LLVMBaseInfo(BaseDecl, FieldTypes.size()));
506
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000507 AppendBytes(NonVirtualSize / 8);
508}
509
510void
511CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
512 const ASTRecordLayout &Layout) {
513 const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
514
515 // Check if we need to add a vtable pointer.
516 if (RD->isDynamicClass()) {
517 if (!PrimaryBase) {
518 const llvm::Type *FunctionType =
519 llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
520 /*isVarArg=*/true);
521 const llvm::Type *VTableTy = FunctionType->getPointerTo();
522
523 assert(NextFieldOffsetInBytes == 0 &&
524 "VTable pointer must come first!");
525 AppendField(NextFieldOffsetInBytes, VTableTy->getPointerTo());
526 } else {
527 // FIXME: Handle a virtual primary base.
528 if (!Layout.getPrimaryBaseWasVirtual())
529 LayoutNonVirtualBase(PrimaryBase, 0);
530 }
531 }
532
533 // Layout the non-virtual bases.
534 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
535 E = RD->bases_end(); I != E; ++I) {
536 if (I->isVirtual())
537 continue;
538
539 const CXXRecordDecl *BaseDecl =
540 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
541
542 // We've already laid out the primary base.
543 if (BaseDecl == PrimaryBase && !Layout.getPrimaryBaseWasVirtual())
544 continue;
545
Anders Carlssona14f5972010-10-31 23:22:37 +0000546 LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffsetInBits(BaseDecl));
Anders Carlsson4b3e5be2009-12-16 17:27:20 +0000547 }
548}
549
Anders Carlsson3d155e62010-11-09 05:25:47 +0000550void
551CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
552 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
553
554 uint64_t AlignedNonVirtualTypeSize =
555 llvm::RoundUpToAlignment(Layout.getNonVirtualSize(),
556 Layout.getNonVirtualAlign()) / 8;
557
558
559 // First check if we can use the same fields as for the complete class.
560 if (AlignedNonVirtualTypeSize == Layout.getSize() / 8) {
561 NonVirtualBaseTypeIsSameAsCompleteType = true;
562 return;
563 }
564
565 NonVirtualBaseFieldTypes = FieldTypes;
566
567 // Check if we need padding.
568 uint64_t AlignedNextFieldOffset =
569 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct);
570
571 assert(AlignedNextFieldOffset <= AlignedNonVirtualTypeSize &&
572 "Size mismatch!");
573
574 if (AlignedNonVirtualTypeSize == AlignedNextFieldOffset) {
575 // We don't need any padding.
576 return;
577 }
578
579 uint64_t NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
580 NonVirtualBaseFieldTypes.push_back(getByteArrayType(NumBytes));
Anders Carlsson3d155e62010-11-09 05:25:47 +0000581}
582
Anders Carlsson45372a62009-07-23 03:17:50 +0000583bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
584 assert(!D->isUnion() && "Can't call LayoutFields on a union!");
Anders Carlssona5dd7222009-08-08 19:38:24 +0000585 assert(Alignment && "Did not set alignment!");
Mike Stump1eb44332009-09-09 15:08:12 +0000586
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000587 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
Mike Stump1eb44332009-09-09 15:08:12 +0000588
Anders Carlsson3d155e62010-11-09 05:25:47 +0000589 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
590 if (RD)
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000591 LayoutNonVirtualBases(RD, Layout);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000592
Anders Carlsson45372a62009-07-23 03:17:50 +0000593 unsigned FieldNo = 0;
Fariborz Jahaniancad86652009-07-27 20:57:45 +0000594
Mike Stump1eb44332009-09-09 15:08:12 +0000595 for (RecordDecl::field_iterator Field = D->field_begin(),
Anders Carlsson45372a62009-07-23 03:17:50 +0000596 FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
597 if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
Mike Stump1eb44332009-09-09 15:08:12 +0000598 assert(!Packed &&
Anders Carlsson45372a62009-07-23 03:17:50 +0000599 "Could not layout fields even with a packed LLVM struct!");
600 return false;
601 }
602 }
603
Anders Carlsson3d155e62010-11-09 05:25:47 +0000604 // We've laid out the non-virtual bases and the fields, now compute the
605 // non-virtual base field types.
606 if (RD)
607 ComputeNonVirtualBaseType(RD);
608
609 // FIXME: Lay out the virtual bases instead of just treating them as tail
610 // padding.
611
Anders Carlsson45372a62009-07-23 03:17:50 +0000612 // Append tail padding if necessary.
Anders Carlssonc1efe362009-07-27 14:55:54 +0000613 AppendTailPadding(Layout.getSize());
Mike Stump1eb44332009-09-09 15:08:12 +0000614
Anders Carlsson45372a62009-07-23 03:17:50 +0000615 return true;
616}
617
Anders Carlssonc1efe362009-07-27 14:55:54 +0000618void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
619 assert(RecordSize % 8 == 0 && "Invalid record size!");
Mike Stump1eb44332009-09-09 15:08:12 +0000620
Anders Carlssonc1efe362009-07-27 14:55:54 +0000621 uint64_t RecordSizeInBytes = RecordSize / 8;
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000622 assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
Mike Stump1eb44332009-09-09 15:08:12 +0000623
Daniel Dunbar270e2032010-03-31 00:11:27 +0000624 uint64_t AlignedNextFieldOffset =
Anders Carlssonc2456822009-12-08 01:24:23 +0000625 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct);
626
627 if (AlignedNextFieldOffset == RecordSizeInBytes) {
628 // We don't need any padding.
629 return;
630 }
Daniel Dunbar270e2032010-03-31 00:11:27 +0000631
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000632 unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
Anders Carlssonc1efe362009-07-27 14:55:54 +0000633 AppendBytes(NumPadBytes);
634}
635
Mike Stump1eb44332009-09-09 15:08:12 +0000636void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
Anders Carlsson45372a62009-07-23 03:17:50 +0000637 const llvm::Type *FieldTy) {
638 AlignmentAsLLVMStruct = std::max(AlignmentAsLLVMStruct,
639 getTypeAlignment(FieldTy));
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000640
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000641 uint64_t FieldSizeInBytes = Types.getTargetData().getTypeAllocSize(FieldTy);
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000642
Anders Carlsson45372a62009-07-23 03:17:50 +0000643 FieldTypes.push_back(FieldTy);
Anders Carlsson45372a62009-07-23 03:17:50 +0000644
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000645 NextFieldOffsetInBytes = FieldOffsetInBytes + FieldSizeInBytes;
Anders Carlsson45372a62009-07-23 03:17:50 +0000646 BitsAvailableInLastField = 0;
647}
648
Mike Stump1eb44332009-09-09 15:08:12 +0000649void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
Anders Carlsson45372a62009-07-23 03:17:50 +0000650 unsigned FieldAlignment) {
Anders Carlsson45372a62009-07-23 03:17:50 +0000651 assert(NextFieldOffsetInBytes <= FieldOffsetInBytes &&
652 "Incorrect field layout!");
Mike Stump1eb44332009-09-09 15:08:12 +0000653
Anders Carlsson45372a62009-07-23 03:17:50 +0000654 // Round up the field offset to the alignment of the field type.
Mike Stump1eb44332009-09-09 15:08:12 +0000655 uint64_t AlignedNextFieldOffsetInBytes =
Anders Carlsson45372a62009-07-23 03:17:50 +0000656 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
657
658 if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
659 // Even with alignment, the field offset is not at the right place,
660 // insert padding.
661 uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
662
663 AppendBytes(PaddingInBytes);
664 }
665}
666
Anders Carlsson3d155e62010-11-09 05:25:47 +0000667const llvm::Type *CGRecordLayoutBuilder::getByteArrayType(uint64_t NumBytes) {
668 assert(NumBytes != 0 && "Empty byte array's aren't allowed.");
Mike Stump1eb44332009-09-09 15:08:12 +0000669
Owen Anderson0032b272009-08-13 21:57:51 +0000670 const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
Anders Carlssonc1efe362009-07-27 14:55:54 +0000671 if (NumBytes > 1)
Anders Carlsson45372a62009-07-23 03:17:50 +0000672 Ty = llvm::ArrayType::get(Ty, NumBytes);
Mike Stump1eb44332009-09-09 15:08:12 +0000673
Anders Carlsson3d155e62010-11-09 05:25:47 +0000674 return Ty;
675}
676
677void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) {
678 if (NumBytes == 0)
679 return;
680
Anders Carlsson45372a62009-07-23 03:17:50 +0000681 // Append the padding field
Anders Carlsson3d155e62010-11-09 05:25:47 +0000682 AppendField(NextFieldOffsetInBytes, getByteArrayType(NumBytes));
Anders Carlsson45372a62009-07-23 03:17:50 +0000683}
684
685unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000686 if (Packed)
Anders Carlsson45372a62009-07-23 03:17:50 +0000687 return 1;
Mike Stump1eb44332009-09-09 15:08:12 +0000688
Anders Carlsson45372a62009-07-23 03:17:50 +0000689 return Types.getTargetData().getABITypeAlignment(Ty);
690}
691
Anders Carlssonf4a15b02010-11-21 23:56:06 +0000692const CGRecordLayout &
693CGRecordLayoutBuilder::getCGRecordLayout(const CXXRecordDecl *RD) {
694 // FIXME: It would be better if there was a way to explicitly compute the
695 // record layout instead of converting to a type.
696 Types.ConvertTagDeclType(RD);
697
698 return Types.getCGRecordLayout(RD);
699}
700
John McCallf16aa102010-08-22 21:01:12 +0000701void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000702 // This record already contains a member pointer.
John McCallf16aa102010-08-22 21:01:12 +0000703 if (!IsZeroInitializable)
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000704 return;
Mike Stump1eb44332009-09-09 15:08:12 +0000705
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000706 // Can only have member pointers if we're compiling C++.
707 if (!Types.getContext().getLangOptions().CPlusPlus)
708 return;
Mike Stump1eb44332009-09-09 15:08:12 +0000709
Anders Carlsson2c12d032010-02-02 05:17:25 +0000710 T = Types.getContext().getBaseElementType(T);
Mike Stump1eb44332009-09-09 15:08:12 +0000711
Anders Carlsson2c12d032010-02-02 05:17:25 +0000712 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
John McCallf16aa102010-08-22 21:01:12 +0000713 if (!Types.getCXXABI().isZeroInitializable(MPT))
714 IsZeroInitializable = false;
Anders Carlsson2c12d032010-02-02 05:17:25 +0000715 } else if (const RecordType *RT = T->getAs<RecordType>()) {
716 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
John McCallf16aa102010-08-22 21:01:12 +0000717 CheckZeroInitializable(RD);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000718 }
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000719}
720
John McCallf16aa102010-08-22 21:01:12 +0000721void CGRecordLayoutBuilder::CheckZeroInitializable(const CXXRecordDecl *RD) {
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000722 // This record already contains a member pointer.
John McCallf16aa102010-08-22 21:01:12 +0000723 if (!IsZeroInitializable)
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000724 return;
725
Anders Carlssonf4a15b02010-11-21 23:56:06 +0000726 const CGRecordLayout &Layout = getCGRecordLayout(RD);
John McCallf16aa102010-08-22 21:01:12 +0000727 if (!Layout.isZeroInitializable())
728 IsZeroInitializable = false;
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000729}
730
Daniel Dunbar270e2032010-03-31 00:11:27 +0000731CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
732 CGRecordLayoutBuilder Builder(*this);
Mike Stump1eb44332009-09-09 15:08:12 +0000733
Anders Carlsson45372a62009-07-23 03:17:50 +0000734 Builder.Layout(D);
Anders Carlsson4c98efd2009-07-24 15:20:52 +0000735
Daniel Dunbar270e2032010-03-31 00:11:27 +0000736 const llvm::Type *Ty = llvm::StructType::get(getLLVMContext(),
Owen Anderson47a434f2009-08-05 23:18:46 +0000737 Builder.FieldTypes,
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000738 Builder.Packed);
Mike Stump1eb44332009-09-09 15:08:12 +0000739
Anders Carlsson3d155e62010-11-09 05:25:47 +0000740 const llvm::Type *BaseTy = 0;
741 if (isa<CXXRecordDecl>(D)) {
742 if (Builder.NonVirtualBaseTypeIsSameAsCompleteType)
743 BaseTy = Ty;
744 else if (!Builder.NonVirtualBaseFieldTypes.empty())
745 BaseTy = llvm::StructType::get(getLLVMContext(),
746 Builder.NonVirtualBaseFieldTypes,
747 Builder.Packed);
748 }
749
Daniel Dunbar198bcb42010-03-31 01:09:11 +0000750 CGRecordLayout *RL =
Anders Carlsson3d155e62010-11-09 05:25:47 +0000751 new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable);
Daniel Dunbar198bcb42010-03-31 01:09:11 +0000752
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000753 // Add all the non-virtual base field numbers.
754 RL->NonVirtualBaseFields.insert(Builder.LLVMNonVirtualBases.begin(),
755 Builder.LLVMNonVirtualBases.end());
756
Anders Carlsson45372a62009-07-23 03:17:50 +0000757 // Add all the field numbers.
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000758 RL->FieldInfo.insert(Builder.LLVMFields.begin(),
759 Builder.LLVMFields.end());
Anders Carlsson45372a62009-07-23 03:17:50 +0000760
761 // Add bitfield info.
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000762 RL->BitFields.insert(Builder.LLVMBitFields.begin(),
763 Builder.LLVMBitFields.end());
Mike Stump1eb44332009-09-09 15:08:12 +0000764
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000765 // Dump the layout, if requested.
Daniel Dunbarab970f92010-04-13 20:58:55 +0000766 if (getContext().getLangOptions().DumpRecordLayouts) {
Daniel Dunbar8d8ab742010-04-19 20:44:53 +0000767 llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
Daniel Dunbarab970f92010-04-13 20:58:55 +0000768 llvm::errs() << "Record: ";
769 D->dump();
770 llvm::errs() << "\nLayout: ";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000771 RL->dump();
Daniel Dunbarab970f92010-04-13 20:58:55 +0000772 }
Daniel Dunbar93c62962010-04-12 18:14:18 +0000773
Daniel Dunbare1467a42010-04-22 02:35:46 +0000774#ifndef NDEBUG
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000775 // Verify that the computed LLVM struct size matches the AST layout size.
Anders Carlsson3d155e62010-11-09 05:25:47 +0000776 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
777
778 uint64_t TypeSizeInBits = Layout.getSize();
Daniel Dunbare1467a42010-04-22 02:35:46 +0000779 assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000780 "Type size mismatch!");
781
Anders Carlsson3d155e62010-11-09 05:25:47 +0000782 if (BaseTy) {
783 uint64_t AlignedNonVirtualTypeSizeInBits =
784 llvm::RoundUpToAlignment(Layout.getNonVirtualSize(),
785 Layout.getNonVirtualAlign());
786
787 assert(AlignedNonVirtualTypeSizeInBits ==
788 getTargetData().getTypeAllocSizeInBits(BaseTy) &&
789 "Type size mismatch!");
790 }
791
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000792 // Verify that the LLVM and AST field offsets agree.
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000793 const llvm::StructType *ST =
794 dyn_cast<llvm::StructType>(RL->getLLVMType());
795 const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
796
797 const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
798 RecordDecl::field_iterator it = D->field_begin();
799 for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
800 const FieldDecl *FD = *it;
Daniel Dunbare1467a42010-04-22 02:35:46 +0000801
802 // For non-bit-fields, just check that the LLVM struct offset matches the
803 // AST offset.
804 if (!FD->isBitField()) {
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000805 unsigned FieldNo = RL->getLLVMFieldNo(FD);
806 assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
807 "Invalid field offset!");
Daniel Dunbare1467a42010-04-22 02:35:46 +0000808 continue;
809 }
810
811 // Ignore unnamed bit-fields.
812 if (!FD->getDeclName())
813 continue;
814
815 const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
816 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
817 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
818
819 // Verify that every component access is within the structure.
820 uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
821 uint64_t AccessBitOffset = FieldOffset + AI.FieldByteOffset * 8;
822 assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
823 "Invalid bit-field access (out of range)!");
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000824 }
825 }
826#endif
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000827
Daniel Dunbar198bcb42010-03-31 01:09:11 +0000828 return RL;
Anders Carlsson45372a62009-07-23 03:17:50 +0000829}
Daniel Dunbar93c62962010-04-12 18:14:18 +0000830
831void CGRecordLayout::print(llvm::raw_ostream &OS) const {
832 OS << "<CGRecordLayout\n";
833 OS << " LLVMType:" << *LLVMType << "\n";
Anders Carlsson9a5a3f22010-11-21 23:59:45 +0000834 if (NonVirtualBaseLLVMType)
835 OS << " NonVirtualBaseLLVMType:" << *NonVirtualBaseLLVMType << "\n";
John McCallf16aa102010-08-22 21:01:12 +0000836 OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000837 OS << " BitFields:[\n";
Daniel Dunbarad759532010-04-22 02:35:36 +0000838
839 // Print bit-field infos in declaration order.
840 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
Daniel Dunbar93c62962010-04-12 18:14:18 +0000841 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
842 it = BitFields.begin(), ie = BitFields.end();
843 it != ie; ++it) {
Daniel Dunbarad759532010-04-22 02:35:36 +0000844 const RecordDecl *RD = it->first->getParent();
845 unsigned Index = 0;
846 for (RecordDecl::field_iterator
847 it2 = RD->field_begin(); *it2 != it->first; ++it2)
848 ++Index;
849 BFIs.push_back(std::make_pair(Index, &it->second));
850 }
851 llvm::array_pod_sort(BFIs.begin(), BFIs.end());
852 for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
Daniel Dunbarab970f92010-04-13 20:58:55 +0000853 OS.indent(4);
Daniel Dunbarad759532010-04-22 02:35:36 +0000854 BFIs[i].second->print(OS);
Daniel Dunbar93c62962010-04-12 18:14:18 +0000855 OS << "\n";
856 }
Daniel Dunbarad759532010-04-22 02:35:36 +0000857
Daniel Dunbar93c62962010-04-12 18:14:18 +0000858 OS << "]>\n";
859}
860
861void CGRecordLayout::dump() const {
862 print(llvm::errs());
863}
864
865void CGBitFieldInfo::print(llvm::raw_ostream &OS) const {
866 OS << "<CGBitFieldInfo";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000867 OS << " Size:" << Size;
Daniel Dunbarab970f92010-04-13 20:58:55 +0000868 OS << " IsSigned:" << IsSigned << "\n";
869
870 OS.indent(4 + strlen("<CGBitFieldInfo"));
871 OS << " NumComponents:" << getNumComponents();
872 OS << " Components: [";
873 if (getNumComponents()) {
874 OS << "\n";
875 for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
876 const AccessInfo &AI = getComponent(i);
877 OS.indent(8);
878 OS << "<AccessInfo"
879 << " FieldIndex:" << AI.FieldIndex
880 << " FieldByteOffset:" << AI.FieldByteOffset
881 << " FieldBitStart:" << AI.FieldBitStart
882 << " AccessWidth:" << AI.AccessWidth << "\n";
883 OS.indent(8 + strlen("<AccessInfo"));
884 OS << " AccessAlignment:" << AI.AccessAlignment
885 << " TargetBitOffset:" << AI.TargetBitOffset
886 << " TargetBitWidth:" << AI.TargetBitWidth
887 << ">\n";
888 }
889 OS.indent(4);
890 }
891 OS << "]>";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000892}
893
894void CGBitFieldInfo::dump() const {
895 print(llvm::errs());
896}