blob: 9f1687577c3ec2fcbff8981a7d44ca1806d705e2 [file] [log] [blame]
Daniel Dunbar270e2032010-03-31 00:11:27 +00001//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
Anders Carlsson45372a62009-07-23 03:17:50 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
Daniel Dunbar270e2032010-03-31 00:11:27 +000010// Builder implementation for CGRecordLayout objects.
Anders Carlsson45372a62009-07-23 03:17:50 +000011//
12//===----------------------------------------------------------------------===//
13
Daniel Dunbar2924ade2010-03-30 22:26:10 +000014#include "CGRecordLayout.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000015#include "clang/AST/ASTContext.h"
16#include "clang/AST/Attr.h"
17#include "clang/AST/DeclCXX.h"
18#include "clang/AST/Expr.h"
19#include "clang/AST/RecordLayout.h"
20#include "CodeGenTypes.h"
21#include "llvm/DerivedTypes.h"
Daniel Dunbar93c62962010-04-12 18:14:18 +000022#include "llvm/Type.h"
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +000023#include "llvm/Support/Debug.h"
Daniel Dunbar93c62962010-04-12 18:14:18 +000024#include "llvm/Support/raw_ostream.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000025#include "llvm/Target/TargetData.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000026using namespace clang;
27using namespace CodeGen;
28
Daniel Dunbar270e2032010-03-31 00:11:27 +000029namespace clang {
30namespace CodeGen {
31
32class CGRecordLayoutBuilder {
33public:
34 /// FieldTypes - Holds the LLVM types that the struct is created from.
35 std::vector<const llvm::Type *> FieldTypes;
36
37 /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number.
38 typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo;
39 llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields;
40
41 /// LLVMBitFieldInfo - Holds location and size information about a bit field.
Daniel Dunbarc7a984a2010-04-06 01:07:41 +000042 typedef std::pair<const FieldDecl *, CGBitFieldInfo> LLVMBitFieldInfo;
Daniel Dunbar270e2032010-03-31 00:11:27 +000043 llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
44
Anders Carlssonc6772ce2010-05-18 05:22:06 +000045 typedef std::pair<const CXXRecordDecl *, unsigned> LLVMBaseInfo;
46 llvm::SmallVector<LLVMBaseInfo, 16> LLVMNonVirtualBases;
47
Daniel Dunbar270e2032010-03-31 00:11:27 +000048 /// ContainsPointerToDataMember - Whether one of the fields in this record
49 /// layout is a pointer to data member, or a struct that contains pointer to
50 /// data member.
51 bool ContainsPointerToDataMember;
52
53 /// Packed - Whether the resulting LLVM struct will be packed or not.
54 bool Packed;
55
56private:
57 CodeGenTypes &Types;
58
59 /// Alignment - Contains the alignment of the RecordDecl.
60 //
61 // FIXME: This is not needed and should be removed.
62 unsigned Alignment;
63
64 /// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the
65 /// LLVM types.
66 unsigned AlignmentAsLLVMStruct;
67
68 /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
69 /// this will have the number of bits still available in the field.
70 char BitsAvailableInLastField;
71
72 /// NextFieldOffsetInBytes - Holds the next field offset in bytes.
73 uint64_t NextFieldOffsetInBytes;
74
Anders Carlsson86664462010-04-17 20:49:27 +000075 /// LayoutUnionField - Will layout a field in an union and return the type
76 /// that the field will have.
77 const llvm::Type *LayoutUnionField(const FieldDecl *Field,
78 const ASTRecordLayout &Layout);
79
Daniel Dunbar270e2032010-03-31 00:11:27 +000080 /// LayoutUnion - Will layout a union RecordDecl.
81 void LayoutUnion(const RecordDecl *D);
82
83 /// LayoutField - try to layout all fields in the record decl.
84 /// Returns false if the operation failed because the struct is not packed.
85 bool LayoutFields(const RecordDecl *D);
86
Anders Carlsson15ddfdc2010-05-18 05:12:20 +000087 /// LayoutNonVirtualBase - layout a single non-virtual base.
88 void LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
89 uint64_t BaseOffset);
90
91 /// LayoutNonVirtualBases - layout the non-virtual bases of a record decl.
92 void LayoutNonVirtualBases(const CXXRecordDecl *RD,
93 const ASTRecordLayout &Layout);
Daniel Dunbar270e2032010-03-31 00:11:27 +000094
95 /// LayoutField - layout a single field. Returns false if the operation failed
96 /// because the current struct is not packed.
97 bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
98
99 /// LayoutBitField - layout a single bit field.
100 void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
101
102 /// AppendField - Appends a field with the given offset and type.
103 void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
104
Daniel Dunbar270e2032010-03-31 00:11:27 +0000105 /// AppendPadding - Appends enough padding bytes so that the total
106 /// struct size is a multiple of the field alignment.
107 void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment);
108
109 /// AppendBytes - Append a given number of bytes to the record.
110 void AppendBytes(uint64_t NumBytes);
111
112 /// AppendTailPadding - Append enough tail padding so that the type will have
113 /// the passed size.
114 void AppendTailPadding(uint64_t RecordSize);
115
116 unsigned getTypeAlignment(const llvm::Type *Ty) const;
Daniel Dunbar270e2032010-03-31 00:11:27 +0000117
118 /// CheckForPointerToDataMember - Check if the given type contains a pointer
119 /// to data member.
120 void CheckForPointerToDataMember(QualType T);
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000121 void CheckForPointerToDataMember(const CXXRecordDecl *RD);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000122
123public:
124 CGRecordLayoutBuilder(CodeGenTypes &Types)
125 : ContainsPointerToDataMember(false), Packed(false), Types(Types),
126 Alignment(0), AlignmentAsLLVMStruct(1),
127 BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { }
128
129 /// Layout - Will layout a RecordDecl.
130 void Layout(const RecordDecl *D);
131};
132
133}
134}
135
Anders Carlsson45372a62009-07-23 03:17:50 +0000136void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
Anders Carlssona5dd7222009-08-08 19:38:24 +0000137 Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8;
Anders Carlssond0eb3b92009-09-02 17:51:33 +0000138 Packed = D->hasAttr<PackedAttr>();
Anders Carlssona5dd7222009-08-08 19:38:24 +0000139
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000140 if (D->isUnion()) {
141 LayoutUnion(D);
142 return;
143 }
Anders Carlssona860e752009-08-08 18:23:56 +0000144
Anders Carlsson45372a62009-07-23 03:17:50 +0000145 if (LayoutFields(D))
146 return;
Mike Stump1eb44332009-09-09 15:08:12 +0000147
Anders Carlsson45372a62009-07-23 03:17:50 +0000148 // We weren't able to layout the struct. Try again with a packed struct
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000149 Packed = true;
Anders Carlsson45372a62009-07-23 03:17:50 +0000150 AlignmentAsLLVMStruct = 1;
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000151 NextFieldOffsetInBytes = 0;
Anders Carlsson45372a62009-07-23 03:17:50 +0000152 FieldTypes.clear();
Anders Carlsson45372a62009-07-23 03:17:50 +0000153 LLVMFields.clear();
154 LLVMBitFields.clear();
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000155 LLVMNonVirtualBases.clear();
Mike Stump1eb44332009-09-09 15:08:12 +0000156
Anders Carlsson45372a62009-07-23 03:17:50 +0000157 LayoutFields(D);
158}
159
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000160static CGBitFieldInfo ComputeBitFieldInfo(CodeGenTypes &Types,
161 const FieldDecl *FD,
162 uint64_t FieldOffset,
163 uint64_t FieldSize) {
Daniel Dunbare1467a42010-04-22 02:35:46 +0000164 const RecordDecl *RD = FD->getParent();
Daniel Dunbar89da8742010-04-22 03:17:04 +0000165 const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
166 uint64_t ContainingTypeSizeInBits = RL.getSize();
167 unsigned ContainingTypeAlign = RL.getAlignment();
Daniel Dunbare1467a42010-04-22 02:35:46 +0000168
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000169 const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
Daniel Dunbarab970f92010-04-13 20:58:55 +0000170 uint64_t TypeSizeInBytes = Types.getTargetData().getTypeAllocSize(Ty);
171 uint64_t TypeSizeInBits = TypeSizeInBytes * 8;
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000172
173 bool IsSigned = FD->getType()->isSignedIntegerType();
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000174
Anders Carlsson1c7658f2010-04-16 16:23:02 +0000175 if (FieldSize > TypeSizeInBits) {
Anders Carlsson6ba38152010-04-17 22:54:57 +0000176 // We have a wide bit-field. The extra bits are only used for padding, so
177 // if we have a bitfield of type T, with size N:
178 //
179 // T t : N;
180 //
181 // We can just assume that it's:
182 //
183 // T t : sizeof(T);
184 //
185 FieldSize = TypeSizeInBits;
Anders Carlsson1c7658f2010-04-16 16:23:02 +0000186 }
187
Daniel Dunbare1467a42010-04-22 02:35:46 +0000188 // Compute the access components. The policy we use is to start by attempting
189 // to access using the width of the bit-field type itself and to always access
190 // at aligned indices of that type. If such an access would fail because it
191 // extends past the bound of the type, then we reduce size to the next smaller
192 // power of two and retry. The current algorithm assumes pow2 sized types,
193 // although this is easy to fix.
194 //
195 // FIXME: This algorithm is wrong on big-endian systems, I think.
196 assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
197 CGBitFieldInfo::AccessInfo Components[3];
198 unsigned NumComponents = 0;
199 unsigned AccessedTargetBits = 0; // The tumber of target bits accessed.
200 unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
Anders Carlsson1c7658f2010-04-16 16:23:02 +0000201
Daniel Dunbare1467a42010-04-22 02:35:46 +0000202 // Round down from the field offset to find the first access position that is
203 // at an aligned offset of the initial access type.
Daniel Dunbar52968a12010-04-22 15:22:33 +0000204 uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
205
206 // Adjust initial access size to fit within record.
207 while (AccessWidth > 8 &&
208 AccessStart + AccessWidth > ContainingTypeSizeInBits) {
209 AccessWidth >>= 1;
210 AccessStart = FieldOffset - (FieldOffset % AccessWidth);
211 }
Daniel Dunbar2df25692010-04-15 05:09:32 +0000212
Daniel Dunbare1467a42010-04-22 02:35:46 +0000213 while (AccessedTargetBits < FieldSize) {
214 // Check that we can access using a type of this size, without reading off
215 // the end of the structure. This can occur with packed structures and
216 // -fno-bitfield-type-align, for example.
217 if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
218 // If so, reduce access size to the next smaller power-of-two and retry.
219 AccessWidth >>= 1;
220 assert(AccessWidth >= 8 && "Cannot access under byte size!");
221 continue;
222 }
Daniel Dunbarab970f92010-04-13 20:58:55 +0000223
Daniel Dunbare1467a42010-04-22 02:35:46 +0000224 // Otherwise, add an access component.
Daniel Dunbarab970f92010-04-13 20:58:55 +0000225
Daniel Dunbare1467a42010-04-22 02:35:46 +0000226 // First, compute the bits inside this access which are part of the
227 // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
228 // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
229 // in the target that we are reading.
Daniel Dunbar52968a12010-04-22 15:22:33 +0000230 assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
231 assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
Daniel Dunbare1467a42010-04-22 02:35:46 +0000232 uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
233 uint64_t AccessBitsInFieldSize =
Daniel Dunbar52968a12010-04-22 15:22:33 +0000234 std::min(AccessWidth + AccessStart,
235 FieldOffset + FieldSize) - AccessBitsInFieldStart;
Daniel Dunbar4651efb2010-04-22 14:56:10 +0000236
Daniel Dunbare1467a42010-04-22 02:35:46 +0000237 assert(NumComponents < 3 && "Unexpected number of components!");
238 CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
239 AI.FieldIndex = 0;
240 // FIXME: We still follow the old access pattern of only using the field
241 // byte offset. We should switch this once we fix the struct layout to be
242 // pretty.
243 AI.FieldByteOffset = AccessStart / 8;
244 AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
245 AI.AccessWidth = AccessWidth;
Daniel Dunbar89da8742010-04-22 03:17:04 +0000246 AI.AccessAlignment = llvm::MinAlign(ContainingTypeAlign, AccessStart) / 8;
Daniel Dunbare1467a42010-04-22 02:35:46 +0000247 AI.TargetBitOffset = AccessedTargetBits;
248 AI.TargetBitWidth = AccessBitsInFieldSize;
249
250 AccessStart += AccessWidth;
251 AccessedTargetBits += AI.TargetBitWidth;
Daniel Dunbarab970f92010-04-13 20:58:55 +0000252 }
253
Daniel Dunbare1467a42010-04-22 02:35:46 +0000254 assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
Daniel Dunbar2df25692010-04-15 05:09:32 +0000255 return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000256}
257
Anders Carlsson45372a62009-07-23 03:17:50 +0000258void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
259 uint64_t FieldOffset) {
Mike Stump1eb44332009-09-09 15:08:12 +0000260 uint64_t FieldSize =
Anders Carlsson45372a62009-07-23 03:17:50 +0000261 D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
Mike Stump1eb44332009-09-09 15:08:12 +0000262
Anders Carlsson45372a62009-07-23 03:17:50 +0000263 if (FieldSize == 0)
264 return;
265
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000266 uint64_t NextFieldOffset = NextFieldOffsetInBytes * 8;
Anders Carlsson45372a62009-07-23 03:17:50 +0000267 unsigned NumBytesToAppend;
Mike Stump1eb44332009-09-09 15:08:12 +0000268
Anders Carlsson45372a62009-07-23 03:17:50 +0000269 if (FieldOffset < NextFieldOffset) {
270 assert(BitsAvailableInLastField && "Bitfield size mismatch!");
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000271 assert(NextFieldOffsetInBytes && "Must have laid out at least one byte!");
Mike Stump1eb44332009-09-09 15:08:12 +0000272
Anders Carlsson45372a62009-07-23 03:17:50 +0000273 // The bitfield begins in the previous bit-field.
Mike Stump1eb44332009-09-09 15:08:12 +0000274 NumBytesToAppend =
Anders Carlsson45372a62009-07-23 03:17:50 +0000275 llvm::RoundUpToAlignment(FieldSize - BitsAvailableInLastField, 8) / 8;
276 } else {
277 assert(FieldOffset % 8 == 0 && "Field offset not aligned correctly");
278
279 // Append padding if necessary.
280 AppendBytes((FieldOffset - NextFieldOffset) / 8);
Mike Stump1eb44332009-09-09 15:08:12 +0000281
282 NumBytesToAppend =
Anders Carlsson45372a62009-07-23 03:17:50 +0000283 llvm::RoundUpToAlignment(FieldSize, 8) / 8;
Mike Stump1eb44332009-09-09 15:08:12 +0000284
Anders Carlsson45372a62009-07-23 03:17:50 +0000285 assert(NumBytesToAppend && "No bytes to append!");
286 }
287
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000288 // Add the bit field info.
289 LLVMBitFields.push_back(
290 LLVMBitFieldInfo(D, ComputeBitFieldInfo(Types, D, FieldOffset, FieldSize)));
Mike Stump1eb44332009-09-09 15:08:12 +0000291
Anders Carlsson45372a62009-07-23 03:17:50 +0000292 AppendBytes(NumBytesToAppend);
Mike Stump1eb44332009-09-09 15:08:12 +0000293
Mike Stump1eb44332009-09-09 15:08:12 +0000294 BitsAvailableInLastField =
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000295 NextFieldOffsetInBytes * 8 - (FieldOffset + FieldSize);
Anders Carlsson45372a62009-07-23 03:17:50 +0000296}
297
298bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
299 uint64_t FieldOffset) {
Anders Carlsson45372a62009-07-23 03:17:50 +0000300 // If the field is packed, then we need a packed struct.
Anders Carlssona860e752009-08-08 18:23:56 +0000301 if (!Packed && D->hasAttr<PackedAttr>())
Anders Carlsson45372a62009-07-23 03:17:50 +0000302 return false;
303
304 if (D->isBitField()) {
305 // We must use packed structs for unnamed bit fields since they
306 // don't affect the struct alignment.
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000307 if (!Packed && !D->getDeclName())
Anders Carlsson45372a62009-07-23 03:17:50 +0000308 return false;
Mike Stump1eb44332009-09-09 15:08:12 +0000309
Anders Carlsson45372a62009-07-23 03:17:50 +0000310 LayoutBitField(D, FieldOffset);
311 return true;
312 }
Mike Stump1eb44332009-09-09 15:08:12 +0000313
Anders Carlsson2c12d032010-02-02 05:17:25 +0000314 // Check if we have a pointer to data member in this field.
315 CheckForPointerToDataMember(D->getType());
Daniel Dunbar270e2032010-03-31 00:11:27 +0000316
Anders Carlsson45372a62009-07-23 03:17:50 +0000317 assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
Anders Carlsson45372a62009-07-23 03:17:50 +0000318 uint64_t FieldOffsetInBytes = FieldOffset / 8;
Mike Stump1eb44332009-09-09 15:08:12 +0000319
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000320 const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
321 unsigned TypeAlignment = getTypeAlignment(Ty);
322
Anders Carlssona5dd7222009-08-08 19:38:24 +0000323 // If the type alignment is larger then the struct alignment, we must use
324 // a packed struct.
325 if (TypeAlignment > Alignment) {
326 assert(!Packed && "Alignment is wrong even with packed struct!");
327 return false;
328 }
Mike Stump1eb44332009-09-09 15:08:12 +0000329
Anders Carlssona5dd7222009-08-08 19:38:24 +0000330 if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
331 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
Daniel Dunbar8a2c92c2010-05-27 01:12:46 +0000332 if (const MaxFieldAlignmentAttr *MFAA =
333 RD->getAttr<MaxFieldAlignmentAttr>()) {
334 if (MFAA->getAlignment() != TypeAlignment * 8 && !Packed)
Anders Carlssona5dd7222009-08-08 19:38:24 +0000335 return false;
336 }
337 }
338
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000339 // Round up the field offset to the alignment of the field type.
Mike Stump1eb44332009-09-09 15:08:12 +0000340 uint64_t AlignedNextFieldOffsetInBytes =
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000341 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, TypeAlignment);
342
343 if (FieldOffsetInBytes < AlignedNextFieldOffsetInBytes) {
344 assert(!Packed && "Could not place field even with packed struct!");
345 return false;
346 }
Mike Stump1eb44332009-09-09 15:08:12 +0000347
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000348 if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
349 // Even with alignment, the field offset is not at the right place,
350 // insert padding.
351 uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
Mike Stump1eb44332009-09-09 15:08:12 +0000352
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000353 AppendBytes(PaddingInBytes);
354 }
Mike Stump1eb44332009-09-09 15:08:12 +0000355
Anders Carlsson45372a62009-07-23 03:17:50 +0000356 // Now append the field.
357 LLVMFields.push_back(LLVMFieldInfo(D, FieldTypes.size()));
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000358 AppendField(FieldOffsetInBytes, Ty);
Mike Stump1eb44332009-09-09 15:08:12 +0000359
Anders Carlsson45372a62009-07-23 03:17:50 +0000360 return true;
361}
362
Anders Carlsson86664462010-04-17 20:49:27 +0000363const llvm::Type *
364CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
365 const ASTRecordLayout &Layout) {
366 if (Field->isBitField()) {
367 uint64_t FieldSize =
368 Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
369
370 // Ignore zero sized bit fields.
371 if (FieldSize == 0)
372 return 0;
373
Daniel Dunbar8ab78a72010-04-20 17:52:30 +0000374 const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
375 unsigned NumBytesToAppend =
376 llvm::RoundUpToAlignment(FieldSize, 8) / 8;
Anders Carlssond62328e2010-04-17 21:04:52 +0000377
Daniel Dunbar8ab78a72010-04-20 17:52:30 +0000378 if (NumBytesToAppend > 1)
379 FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend);
Anders Carlssond62328e2010-04-17 21:04:52 +0000380
Anders Carlsson86664462010-04-17 20:49:27 +0000381 // Add the bit field info.
382 LLVMBitFields.push_back(
383 LLVMBitFieldInfo(Field, ComputeBitFieldInfo(Types, Field, 0, FieldSize)));
Anders Carlssond62328e2010-04-17 21:04:52 +0000384 return FieldTy;
Anders Carlsson86664462010-04-17 20:49:27 +0000385 }
Daniel Dunbar8ab78a72010-04-20 17:52:30 +0000386
Anders Carlsson86664462010-04-17 20:49:27 +0000387 // This is a regular union field.
388 LLVMFields.push_back(LLVMFieldInfo(Field, 0));
389 return Types.ConvertTypeForMemRecursive(Field->getType());
390}
391
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000392void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
393 assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
Mike Stump1eb44332009-09-09 15:08:12 +0000394
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000395 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
Mike Stump1eb44332009-09-09 15:08:12 +0000396
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000397 const llvm::Type *Ty = 0;
398 uint64_t Size = 0;
399 unsigned Align = 0;
Mike Stump1eb44332009-09-09 15:08:12 +0000400
Anders Carlsson21fd7d72010-01-28 18:22:03 +0000401 bool HasOnlyZeroSizedBitFields = true;
Daniel Dunbar270e2032010-03-31 00:11:27 +0000402
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000403 unsigned FieldNo = 0;
Mike Stump1eb44332009-09-09 15:08:12 +0000404 for (RecordDecl::field_iterator Field = D->field_begin(),
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000405 FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
Mike Stump1eb44332009-09-09 15:08:12 +0000406 assert(Layout.getFieldOffset(FieldNo) == 0 &&
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000407 "Union field offset did not start at the beginning of record!");
Anders Carlsson86664462010-04-17 20:49:27 +0000408 const llvm::Type *FieldTy = LayoutUnionField(*Field, Layout);
Anders Carlsson2cc8f172009-07-23 04:00:39 +0000409
Anders Carlsson86664462010-04-17 20:49:27 +0000410 if (!FieldTy)
411 continue;
Mike Stump1eb44332009-09-09 15:08:12 +0000412
Anders Carlsson21fd7d72010-01-28 18:22:03 +0000413 HasOnlyZeroSizedBitFields = false;
Daniel Dunbar270e2032010-03-31 00:11:27 +0000414
Anders Carlsson177d4d82009-07-23 21:52:03 +0000415 unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
416 uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
Mike Stump1eb44332009-09-09 15:08:12 +0000417
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000418 if (FieldAlign < Align)
419 continue;
Mike Stump1eb44332009-09-09 15:08:12 +0000420
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000421 if (FieldAlign > Align || FieldSize > Size) {
422 Ty = FieldTy;
423 Align = FieldAlign;
424 Size = FieldSize;
425 }
426 }
Mike Stump1eb44332009-09-09 15:08:12 +0000427
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000428 // Now add our field.
Anders Carlsson36620002009-09-03 22:56:02 +0000429 if (Ty) {
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000430 AppendField(0, Ty);
Anders Carlsson36620002009-09-03 22:56:02 +0000431
432 if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) {
433 // We need a packed struct.
434 Packed = true;
435 Align = 1;
436 }
437 }
Fariborz Jahaniane5041702009-11-06 20:47:40 +0000438 if (!Align) {
Anders Carlsson21fd7d72010-01-28 18:22:03 +0000439 assert(HasOnlyZeroSizedBitFields &&
440 "0-align record did not have all zero-sized bit-fields!");
Fariborz Jahaniane5041702009-11-06 20:47:40 +0000441 Align = 1;
442 }
Daniel Dunbar270e2032010-03-31 00:11:27 +0000443
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000444 // Append tail padding.
445 if (Layout.getSize() / 8 > Size)
446 AppendPadding(Layout.getSize() / 8, Align);
447}
448
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000449void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
450 uint64_t BaseOffset) {
451 const ASTRecordLayout &Layout =
452 Types.getContext().getASTRecordLayout(BaseDecl);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000453
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000454 uint64_t NonVirtualSize = Layout.getNonVirtualSize();
455
456 if (BaseDecl->isEmpty()) {
457 // FIXME: Lay out empty bases.
458 return;
459 }
460
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000461 CheckForPointerToDataMember(BaseDecl);
462
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000463 // FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can.
464 AppendPadding(BaseOffset / 8, 1);
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000465
466 // Append the base field.
467 LLVMNonVirtualBases.push_back(LLVMBaseInfo(BaseDecl, FieldTypes.size()));
468
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000469 AppendBytes(NonVirtualSize / 8);
470}
471
472void
473CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
474 const ASTRecordLayout &Layout) {
475 const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
476
477 // Check if we need to add a vtable pointer.
478 if (RD->isDynamicClass()) {
479 if (!PrimaryBase) {
480 const llvm::Type *FunctionType =
481 llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
482 /*isVarArg=*/true);
483 const llvm::Type *VTableTy = FunctionType->getPointerTo();
484
485 assert(NextFieldOffsetInBytes == 0 &&
486 "VTable pointer must come first!");
487 AppendField(NextFieldOffsetInBytes, VTableTy->getPointerTo());
488 } else {
489 // FIXME: Handle a virtual primary base.
490 if (!Layout.getPrimaryBaseWasVirtual())
491 LayoutNonVirtualBase(PrimaryBase, 0);
492 }
493 }
494
495 // Layout the non-virtual bases.
496 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
497 E = RD->bases_end(); I != E; ++I) {
498 if (I->isVirtual())
499 continue;
500
501 const CXXRecordDecl *BaseDecl =
502 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
503
504 // We've already laid out the primary base.
505 if (BaseDecl == PrimaryBase && !Layout.getPrimaryBaseWasVirtual())
506 continue;
507
508 LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl));
Anders Carlsson4b3e5be2009-12-16 17:27:20 +0000509 }
510}
511
Anders Carlsson45372a62009-07-23 03:17:50 +0000512bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
513 assert(!D->isUnion() && "Can't call LayoutFields on a union!");
Anders Carlssona5dd7222009-08-08 19:38:24 +0000514 assert(Alignment && "Did not set alignment!");
Mike Stump1eb44332009-09-09 15:08:12 +0000515
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000516 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
Mike Stump1eb44332009-09-09 15:08:12 +0000517
Anders Carlsson4b3e5be2009-12-16 17:27:20 +0000518 if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D))
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000519 LayoutNonVirtualBases(RD, Layout);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000520
Anders Carlsson45372a62009-07-23 03:17:50 +0000521 unsigned FieldNo = 0;
Fariborz Jahaniancad86652009-07-27 20:57:45 +0000522
Mike Stump1eb44332009-09-09 15:08:12 +0000523 for (RecordDecl::field_iterator Field = D->field_begin(),
Anders Carlsson45372a62009-07-23 03:17:50 +0000524 FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
525 if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
Mike Stump1eb44332009-09-09 15:08:12 +0000526 assert(!Packed &&
Anders Carlsson45372a62009-07-23 03:17:50 +0000527 "Could not layout fields even with a packed LLVM struct!");
528 return false;
529 }
530 }
531
532 // Append tail padding if necessary.
Anders Carlssonc1efe362009-07-27 14:55:54 +0000533 AppendTailPadding(Layout.getSize());
Mike Stump1eb44332009-09-09 15:08:12 +0000534
Anders Carlsson45372a62009-07-23 03:17:50 +0000535 return true;
536}
537
Anders Carlssonc1efe362009-07-27 14:55:54 +0000538void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
539 assert(RecordSize % 8 == 0 && "Invalid record size!");
Mike Stump1eb44332009-09-09 15:08:12 +0000540
Anders Carlssonc1efe362009-07-27 14:55:54 +0000541 uint64_t RecordSizeInBytes = RecordSize / 8;
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000542 assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
Mike Stump1eb44332009-09-09 15:08:12 +0000543
Daniel Dunbar270e2032010-03-31 00:11:27 +0000544 uint64_t AlignedNextFieldOffset =
Anders Carlssonc2456822009-12-08 01:24:23 +0000545 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct);
546
547 if (AlignedNextFieldOffset == RecordSizeInBytes) {
548 // We don't need any padding.
549 return;
550 }
Daniel Dunbar270e2032010-03-31 00:11:27 +0000551
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000552 unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
Anders Carlssonc1efe362009-07-27 14:55:54 +0000553 AppendBytes(NumPadBytes);
554}
555
Mike Stump1eb44332009-09-09 15:08:12 +0000556void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
Anders Carlsson45372a62009-07-23 03:17:50 +0000557 const llvm::Type *FieldTy) {
558 AlignmentAsLLVMStruct = std::max(AlignmentAsLLVMStruct,
559 getTypeAlignment(FieldTy));
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000560
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000561 uint64_t FieldSizeInBytes = Types.getTargetData().getTypeAllocSize(FieldTy);
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000562
Anders Carlsson45372a62009-07-23 03:17:50 +0000563 FieldTypes.push_back(FieldTy);
Anders Carlsson45372a62009-07-23 03:17:50 +0000564
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000565 NextFieldOffsetInBytes = FieldOffsetInBytes + FieldSizeInBytes;
Anders Carlsson45372a62009-07-23 03:17:50 +0000566 BitsAvailableInLastField = 0;
567}
568
Mike Stump1eb44332009-09-09 15:08:12 +0000569void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
Anders Carlsson45372a62009-07-23 03:17:50 +0000570 unsigned FieldAlignment) {
Anders Carlsson45372a62009-07-23 03:17:50 +0000571 assert(NextFieldOffsetInBytes <= FieldOffsetInBytes &&
572 "Incorrect field layout!");
Mike Stump1eb44332009-09-09 15:08:12 +0000573
Anders Carlsson45372a62009-07-23 03:17:50 +0000574 // Round up the field offset to the alignment of the field type.
Mike Stump1eb44332009-09-09 15:08:12 +0000575 uint64_t AlignedNextFieldOffsetInBytes =
Anders Carlsson45372a62009-07-23 03:17:50 +0000576 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
577
578 if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
579 // Even with alignment, the field offset is not at the right place,
580 // insert padding.
581 uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
582
583 AppendBytes(PaddingInBytes);
584 }
585}
586
587void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) {
588 if (NumBytes == 0)
589 return;
Mike Stump1eb44332009-09-09 15:08:12 +0000590
Owen Anderson0032b272009-08-13 21:57:51 +0000591 const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
Anders Carlssonc1efe362009-07-27 14:55:54 +0000592 if (NumBytes > 1)
Anders Carlsson45372a62009-07-23 03:17:50 +0000593 Ty = llvm::ArrayType::get(Ty, NumBytes);
Mike Stump1eb44332009-09-09 15:08:12 +0000594
Anders Carlsson45372a62009-07-23 03:17:50 +0000595 // Append the padding field
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000596 AppendField(NextFieldOffsetInBytes, Ty);
Anders Carlsson45372a62009-07-23 03:17:50 +0000597}
598
599unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000600 if (Packed)
Anders Carlsson45372a62009-07-23 03:17:50 +0000601 return 1;
Mike Stump1eb44332009-09-09 15:08:12 +0000602
Anders Carlsson45372a62009-07-23 03:17:50 +0000603 return Types.getTargetData().getABITypeAlignment(Ty);
604}
605
Anders Carlsson2c12d032010-02-02 05:17:25 +0000606void CGRecordLayoutBuilder::CheckForPointerToDataMember(QualType T) {
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000607 // This record already contains a member pointer.
Anders Carlsson2c12d032010-02-02 05:17:25 +0000608 if (ContainsPointerToDataMember)
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000609 return;
Mike Stump1eb44332009-09-09 15:08:12 +0000610
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000611 // Can only have member pointers if we're compiling C++.
612 if (!Types.getContext().getLangOptions().CPlusPlus)
613 return;
Mike Stump1eb44332009-09-09 15:08:12 +0000614
Anders Carlsson2c12d032010-02-02 05:17:25 +0000615 T = Types.getContext().getBaseElementType(T);
Mike Stump1eb44332009-09-09 15:08:12 +0000616
Anders Carlsson2c12d032010-02-02 05:17:25 +0000617 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
618 if (!MPT->getPointeeType()->isFunctionType()) {
619 // We have a pointer to data member.
620 ContainsPointerToDataMember = true;
621 }
622 } else if (const RecordType *RT = T->getAs<RecordType>()) {
623 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
Daniel Dunbar270e2032010-03-31 00:11:27 +0000624
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000625 return CheckForPointerToDataMember(RD);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000626 }
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000627}
628
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000629void
630CGRecordLayoutBuilder::CheckForPointerToDataMember(const CXXRecordDecl *RD) {
631 // This record already contains a member pointer.
632 if (ContainsPointerToDataMember)
633 return;
634
635 // FIXME: It would be better if there was a way to explicitly compute the
636 // record layout instead of converting to a type.
637 Types.ConvertTagDeclType(RD);
638
639 const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
640
641 if (Layout.containsPointerToDataMember())
642 ContainsPointerToDataMember = true;
643}
644
Daniel Dunbar270e2032010-03-31 00:11:27 +0000645CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
646 CGRecordLayoutBuilder Builder(*this);
Mike Stump1eb44332009-09-09 15:08:12 +0000647
Anders Carlsson45372a62009-07-23 03:17:50 +0000648 Builder.Layout(D);
Anders Carlsson4c98efd2009-07-24 15:20:52 +0000649
Daniel Dunbar270e2032010-03-31 00:11:27 +0000650 const llvm::Type *Ty = llvm::StructType::get(getLLVMContext(),
Owen Anderson47a434f2009-08-05 23:18:46 +0000651 Builder.FieldTypes,
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000652 Builder.Packed);
Mike Stump1eb44332009-09-09 15:08:12 +0000653
Daniel Dunbar198bcb42010-03-31 01:09:11 +0000654 CGRecordLayout *RL =
655 new CGRecordLayout(Ty, Builder.ContainsPointerToDataMember);
656
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000657 // Add all the non-virtual base field numbers.
658 RL->NonVirtualBaseFields.insert(Builder.LLVMNonVirtualBases.begin(),
659 Builder.LLVMNonVirtualBases.end());
660
Anders Carlsson45372a62009-07-23 03:17:50 +0000661 // Add all the field numbers.
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000662 RL->FieldInfo.insert(Builder.LLVMFields.begin(),
663 Builder.LLVMFields.end());
Anders Carlsson45372a62009-07-23 03:17:50 +0000664
665 // Add bitfield info.
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000666 RL->BitFields.insert(Builder.LLVMBitFields.begin(),
667 Builder.LLVMBitFields.end());
Mike Stump1eb44332009-09-09 15:08:12 +0000668
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000669 // Dump the layout, if requested.
Daniel Dunbarab970f92010-04-13 20:58:55 +0000670 if (getContext().getLangOptions().DumpRecordLayouts) {
Daniel Dunbar8d8ab742010-04-19 20:44:53 +0000671 llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
Daniel Dunbarab970f92010-04-13 20:58:55 +0000672 llvm::errs() << "Record: ";
673 D->dump();
674 llvm::errs() << "\nLayout: ";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000675 RL->dump();
Daniel Dunbarab970f92010-04-13 20:58:55 +0000676 }
Daniel Dunbar93c62962010-04-12 18:14:18 +0000677
Daniel Dunbare1467a42010-04-22 02:35:46 +0000678#ifndef NDEBUG
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000679 // Verify that the computed LLVM struct size matches the AST layout size.
Daniel Dunbare1467a42010-04-22 02:35:46 +0000680 uint64_t TypeSizeInBits = getContext().getASTRecordLayout(D).getSize();
681 assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000682 "Type size mismatch!");
683
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000684 // Verify that the LLVM and AST field offsets agree.
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000685 const llvm::StructType *ST =
686 dyn_cast<llvm::StructType>(RL->getLLVMType());
687 const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
688
689 const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
690 RecordDecl::field_iterator it = D->field_begin();
691 for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
692 const FieldDecl *FD = *it;
Daniel Dunbare1467a42010-04-22 02:35:46 +0000693
694 // For non-bit-fields, just check that the LLVM struct offset matches the
695 // AST offset.
696 if (!FD->isBitField()) {
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000697 unsigned FieldNo = RL->getLLVMFieldNo(FD);
698 assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
699 "Invalid field offset!");
Daniel Dunbare1467a42010-04-22 02:35:46 +0000700 continue;
701 }
702
703 // Ignore unnamed bit-fields.
704 if (!FD->getDeclName())
705 continue;
706
707 const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
708 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
709 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
710
711 // Verify that every component access is within the structure.
712 uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
713 uint64_t AccessBitOffset = FieldOffset + AI.FieldByteOffset * 8;
714 assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
715 "Invalid bit-field access (out of range)!");
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000716 }
717 }
718#endif
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000719
Daniel Dunbar198bcb42010-03-31 01:09:11 +0000720 return RL;
Anders Carlsson45372a62009-07-23 03:17:50 +0000721}
Daniel Dunbar93c62962010-04-12 18:14:18 +0000722
723void CGRecordLayout::print(llvm::raw_ostream &OS) const {
724 OS << "<CGRecordLayout\n";
725 OS << " LLVMType:" << *LLVMType << "\n";
726 OS << " ContainsPointerToDataMember:" << ContainsPointerToDataMember << "\n";
727 OS << " BitFields:[\n";
Daniel Dunbarad759532010-04-22 02:35:36 +0000728
729 // Print bit-field infos in declaration order.
730 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
Daniel Dunbar93c62962010-04-12 18:14:18 +0000731 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
732 it = BitFields.begin(), ie = BitFields.end();
733 it != ie; ++it) {
Daniel Dunbarad759532010-04-22 02:35:36 +0000734 const RecordDecl *RD = it->first->getParent();
735 unsigned Index = 0;
736 for (RecordDecl::field_iterator
737 it2 = RD->field_begin(); *it2 != it->first; ++it2)
738 ++Index;
739 BFIs.push_back(std::make_pair(Index, &it->second));
740 }
741 llvm::array_pod_sort(BFIs.begin(), BFIs.end());
742 for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
Daniel Dunbarab970f92010-04-13 20:58:55 +0000743 OS.indent(4);
Daniel Dunbarad759532010-04-22 02:35:36 +0000744 BFIs[i].second->print(OS);
Daniel Dunbar93c62962010-04-12 18:14:18 +0000745 OS << "\n";
746 }
Daniel Dunbarad759532010-04-22 02:35:36 +0000747
Daniel Dunbar93c62962010-04-12 18:14:18 +0000748 OS << "]>\n";
749}
750
751void CGRecordLayout::dump() const {
752 print(llvm::errs());
753}
754
755void CGBitFieldInfo::print(llvm::raw_ostream &OS) const {
756 OS << "<CGBitFieldInfo";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000757 OS << " Size:" << Size;
Daniel Dunbarab970f92010-04-13 20:58:55 +0000758 OS << " IsSigned:" << IsSigned << "\n";
759
760 OS.indent(4 + strlen("<CGBitFieldInfo"));
761 OS << " NumComponents:" << getNumComponents();
762 OS << " Components: [";
763 if (getNumComponents()) {
764 OS << "\n";
765 for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
766 const AccessInfo &AI = getComponent(i);
767 OS.indent(8);
768 OS << "<AccessInfo"
769 << " FieldIndex:" << AI.FieldIndex
770 << " FieldByteOffset:" << AI.FieldByteOffset
771 << " FieldBitStart:" << AI.FieldBitStart
772 << " AccessWidth:" << AI.AccessWidth << "\n";
773 OS.indent(8 + strlen("<AccessInfo"));
774 OS << " AccessAlignment:" << AI.AccessAlignment
775 << " TargetBitOffset:" << AI.TargetBitOffset
776 << " TargetBitWidth:" << AI.TargetBitWidth
777 << ">\n";
778 }
779 OS.indent(4);
780 }
781 OS << "]>";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000782}
783
784void CGBitFieldInfo::dump() const {
785 print(llvm::errs());
786}