blob: c93e093a43446a37c86b186dc14aeab12804dee5 [file] [log] [blame]
Daniel Dunbar270e2032010-03-31 00:11:27 +00001//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
Anders Carlsson45372a62009-07-23 03:17:50 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
Daniel Dunbar270e2032010-03-31 00:11:27 +000010// Builder implementation for CGRecordLayout objects.
Anders Carlsson45372a62009-07-23 03:17:50 +000011//
12//===----------------------------------------------------------------------===//
13
Daniel Dunbar2924ade2010-03-30 22:26:10 +000014#include "CGRecordLayout.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000015#include "clang/AST/ASTContext.h"
16#include "clang/AST/Attr.h"
17#include "clang/AST/DeclCXX.h"
18#include "clang/AST/Expr.h"
19#include "clang/AST/RecordLayout.h"
20#include "CodeGenTypes.h"
21#include "llvm/DerivedTypes.h"
Daniel Dunbar93c62962010-04-12 18:14:18 +000022#include "llvm/Type.h"
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +000023#include "llvm/Support/Debug.h"
Daniel Dunbar93c62962010-04-12 18:14:18 +000024#include "llvm/Support/raw_ostream.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000025#include "llvm/Target/TargetData.h"
Anders Carlsson45372a62009-07-23 03:17:50 +000026using namespace clang;
27using namespace CodeGen;
28
Daniel Dunbar270e2032010-03-31 00:11:27 +000029namespace clang {
30namespace CodeGen {
31
32class CGRecordLayoutBuilder {
33public:
34 /// FieldTypes - Holds the LLVM types that the struct is created from.
35 std::vector<const llvm::Type *> FieldTypes;
36
37 /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number.
38 typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo;
39 llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields;
40
41 /// LLVMBitFieldInfo - Holds location and size information about a bit field.
Daniel Dunbarc7a984a2010-04-06 01:07:41 +000042 typedef std::pair<const FieldDecl *, CGBitFieldInfo> LLVMBitFieldInfo;
Daniel Dunbar270e2032010-03-31 00:11:27 +000043 llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
44
Anders Carlssonc6772ce2010-05-18 05:22:06 +000045 typedef std::pair<const CXXRecordDecl *, unsigned> LLVMBaseInfo;
46 llvm::SmallVector<LLVMBaseInfo, 16> LLVMNonVirtualBases;
47
Daniel Dunbar270e2032010-03-31 00:11:27 +000048 /// ContainsPointerToDataMember - Whether one of the fields in this record
49 /// layout is a pointer to data member, or a struct that contains pointer to
50 /// data member.
51 bool ContainsPointerToDataMember;
52
53 /// Packed - Whether the resulting LLVM struct will be packed or not.
54 bool Packed;
55
56private:
57 CodeGenTypes &Types;
58
59 /// Alignment - Contains the alignment of the RecordDecl.
60 //
61 // FIXME: This is not needed and should be removed.
62 unsigned Alignment;
63
64 /// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the
65 /// LLVM types.
66 unsigned AlignmentAsLLVMStruct;
67
68 /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
69 /// this will have the number of bits still available in the field.
70 char BitsAvailableInLastField;
71
72 /// NextFieldOffsetInBytes - Holds the next field offset in bytes.
73 uint64_t NextFieldOffsetInBytes;
74
Anders Carlsson86664462010-04-17 20:49:27 +000075 /// LayoutUnionField - Will layout a field in an union and return the type
76 /// that the field will have.
77 const llvm::Type *LayoutUnionField(const FieldDecl *Field,
78 const ASTRecordLayout &Layout);
79
Daniel Dunbar270e2032010-03-31 00:11:27 +000080 /// LayoutUnion - Will layout a union RecordDecl.
81 void LayoutUnion(const RecordDecl *D);
82
83 /// LayoutField - try to layout all fields in the record decl.
84 /// Returns false if the operation failed because the struct is not packed.
85 bool LayoutFields(const RecordDecl *D);
86
Anders Carlsson15ddfdc2010-05-18 05:12:20 +000087 /// LayoutNonVirtualBase - layout a single non-virtual base.
88 void LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
89 uint64_t BaseOffset);
90
91 /// LayoutNonVirtualBases - layout the non-virtual bases of a record decl.
92 void LayoutNonVirtualBases(const CXXRecordDecl *RD,
93 const ASTRecordLayout &Layout);
Daniel Dunbar270e2032010-03-31 00:11:27 +000094
95 /// LayoutField - layout a single field. Returns false if the operation failed
96 /// because the current struct is not packed.
97 bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
98
99 /// LayoutBitField - layout a single bit field.
100 void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
101
102 /// AppendField - Appends a field with the given offset and type.
103 void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
104
Daniel Dunbar270e2032010-03-31 00:11:27 +0000105 /// AppendPadding - Appends enough padding bytes so that the total
106 /// struct size is a multiple of the field alignment.
107 void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment);
108
109 /// AppendBytes - Append a given number of bytes to the record.
110 void AppendBytes(uint64_t NumBytes);
111
112 /// AppendTailPadding - Append enough tail padding so that the type will have
113 /// the passed size.
114 void AppendTailPadding(uint64_t RecordSize);
115
116 unsigned getTypeAlignment(const llvm::Type *Ty) const;
Daniel Dunbar270e2032010-03-31 00:11:27 +0000117
118 /// CheckForPointerToDataMember - Check if the given type contains a pointer
119 /// to data member.
120 void CheckForPointerToDataMember(QualType T);
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000121 void CheckForPointerToDataMember(const CXXRecordDecl *RD);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000122
123public:
124 CGRecordLayoutBuilder(CodeGenTypes &Types)
125 : ContainsPointerToDataMember(false), Packed(false), Types(Types),
126 Alignment(0), AlignmentAsLLVMStruct(1),
127 BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { }
128
129 /// Layout - Will layout a RecordDecl.
130 void Layout(const RecordDecl *D);
131};
132
133}
134}
135
Anders Carlsson45372a62009-07-23 03:17:50 +0000136void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
Anders Carlssona5dd7222009-08-08 19:38:24 +0000137 Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8;
Anders Carlssond0eb3b92009-09-02 17:51:33 +0000138 Packed = D->hasAttr<PackedAttr>();
Anders Carlssona5dd7222009-08-08 19:38:24 +0000139
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000140 if (D->isUnion()) {
141 LayoutUnion(D);
142 return;
143 }
Anders Carlssona860e752009-08-08 18:23:56 +0000144
Anders Carlsson45372a62009-07-23 03:17:50 +0000145 if (LayoutFields(D))
146 return;
Mike Stump1eb44332009-09-09 15:08:12 +0000147
Anders Carlsson45372a62009-07-23 03:17:50 +0000148 // We weren't able to layout the struct. Try again with a packed struct
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000149 Packed = true;
Anders Carlsson45372a62009-07-23 03:17:50 +0000150 AlignmentAsLLVMStruct = 1;
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000151 NextFieldOffsetInBytes = 0;
Anders Carlsson45372a62009-07-23 03:17:50 +0000152 FieldTypes.clear();
Anders Carlsson45372a62009-07-23 03:17:50 +0000153 LLVMFields.clear();
154 LLVMBitFields.clear();
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000155 LLVMNonVirtualBases.clear();
Mike Stump1eb44332009-09-09 15:08:12 +0000156
Anders Carlsson45372a62009-07-23 03:17:50 +0000157 LayoutFields(D);
158}
159
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000160static CGBitFieldInfo ComputeBitFieldInfo(CodeGenTypes &Types,
161 const FieldDecl *FD,
162 uint64_t FieldOffset,
163 uint64_t FieldSize) {
Daniel Dunbare1467a42010-04-22 02:35:46 +0000164 const RecordDecl *RD = FD->getParent();
Daniel Dunbar89da8742010-04-22 03:17:04 +0000165 const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
166 uint64_t ContainingTypeSizeInBits = RL.getSize();
167 unsigned ContainingTypeAlign = RL.getAlignment();
Daniel Dunbare1467a42010-04-22 02:35:46 +0000168
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000169 const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(FD->getType());
Daniel Dunbarab970f92010-04-13 20:58:55 +0000170 uint64_t TypeSizeInBytes = Types.getTargetData().getTypeAllocSize(Ty);
171 uint64_t TypeSizeInBits = TypeSizeInBytes * 8;
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000172
173 bool IsSigned = FD->getType()->isSignedIntegerType();
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000174
Anders Carlsson1c7658f2010-04-16 16:23:02 +0000175 if (FieldSize > TypeSizeInBits) {
Anders Carlsson6ba38152010-04-17 22:54:57 +0000176 // We have a wide bit-field. The extra bits are only used for padding, so
177 // if we have a bitfield of type T, with size N:
178 //
179 // T t : N;
180 //
181 // We can just assume that it's:
182 //
183 // T t : sizeof(T);
184 //
185 FieldSize = TypeSizeInBits;
Anders Carlsson1c7658f2010-04-16 16:23:02 +0000186 }
187
Daniel Dunbare1467a42010-04-22 02:35:46 +0000188 // Compute the access components. The policy we use is to start by attempting
189 // to access using the width of the bit-field type itself and to always access
190 // at aligned indices of that type. If such an access would fail because it
191 // extends past the bound of the type, then we reduce size to the next smaller
192 // power of two and retry. The current algorithm assumes pow2 sized types,
193 // although this is easy to fix.
194 //
195 // FIXME: This algorithm is wrong on big-endian systems, I think.
196 assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
197 CGBitFieldInfo::AccessInfo Components[3];
198 unsigned NumComponents = 0;
199 unsigned AccessedTargetBits = 0; // The tumber of target bits accessed.
200 unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
Anders Carlsson1c7658f2010-04-16 16:23:02 +0000201
Daniel Dunbare1467a42010-04-22 02:35:46 +0000202 // Round down from the field offset to find the first access position that is
203 // at an aligned offset of the initial access type.
Daniel Dunbar52968a12010-04-22 15:22:33 +0000204 uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
205
206 // Adjust initial access size to fit within record.
207 while (AccessWidth > 8 &&
208 AccessStart + AccessWidth > ContainingTypeSizeInBits) {
209 AccessWidth >>= 1;
210 AccessStart = FieldOffset - (FieldOffset % AccessWidth);
211 }
Daniel Dunbar2df25692010-04-15 05:09:32 +0000212
Daniel Dunbare1467a42010-04-22 02:35:46 +0000213 while (AccessedTargetBits < FieldSize) {
214 // Check that we can access using a type of this size, without reading off
215 // the end of the structure. This can occur with packed structures and
216 // -fno-bitfield-type-align, for example.
217 if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
218 // If so, reduce access size to the next smaller power-of-two and retry.
219 AccessWidth >>= 1;
220 assert(AccessWidth >= 8 && "Cannot access under byte size!");
221 continue;
222 }
Daniel Dunbarab970f92010-04-13 20:58:55 +0000223
Daniel Dunbare1467a42010-04-22 02:35:46 +0000224 // Otherwise, add an access component.
Daniel Dunbarab970f92010-04-13 20:58:55 +0000225
Daniel Dunbare1467a42010-04-22 02:35:46 +0000226 // First, compute the bits inside this access which are part of the
227 // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
228 // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
229 // in the target that we are reading.
Daniel Dunbar52968a12010-04-22 15:22:33 +0000230 assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
231 assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
Daniel Dunbare1467a42010-04-22 02:35:46 +0000232 uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
233 uint64_t AccessBitsInFieldSize =
Daniel Dunbar52968a12010-04-22 15:22:33 +0000234 std::min(AccessWidth + AccessStart,
235 FieldOffset + FieldSize) - AccessBitsInFieldStart;
Daniel Dunbar4651efb2010-04-22 14:56:10 +0000236
Daniel Dunbare1467a42010-04-22 02:35:46 +0000237 assert(NumComponents < 3 && "Unexpected number of components!");
238 CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
239 AI.FieldIndex = 0;
240 // FIXME: We still follow the old access pattern of only using the field
241 // byte offset. We should switch this once we fix the struct layout to be
242 // pretty.
243 AI.FieldByteOffset = AccessStart / 8;
244 AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
245 AI.AccessWidth = AccessWidth;
Daniel Dunbar89da8742010-04-22 03:17:04 +0000246 AI.AccessAlignment = llvm::MinAlign(ContainingTypeAlign, AccessStart) / 8;
Daniel Dunbare1467a42010-04-22 02:35:46 +0000247 AI.TargetBitOffset = AccessedTargetBits;
248 AI.TargetBitWidth = AccessBitsInFieldSize;
249
250 AccessStart += AccessWidth;
251 AccessedTargetBits += AI.TargetBitWidth;
Daniel Dunbarab970f92010-04-13 20:58:55 +0000252 }
253
Daniel Dunbare1467a42010-04-22 02:35:46 +0000254 assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
Daniel Dunbar2df25692010-04-15 05:09:32 +0000255 return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000256}
257
Anders Carlsson45372a62009-07-23 03:17:50 +0000258void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
259 uint64_t FieldOffset) {
Mike Stump1eb44332009-09-09 15:08:12 +0000260 uint64_t FieldSize =
Anders Carlsson45372a62009-07-23 03:17:50 +0000261 D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
Mike Stump1eb44332009-09-09 15:08:12 +0000262
Anders Carlsson45372a62009-07-23 03:17:50 +0000263 if (FieldSize == 0)
264 return;
265
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000266 uint64_t NextFieldOffset = NextFieldOffsetInBytes * 8;
Anders Carlsson45372a62009-07-23 03:17:50 +0000267 unsigned NumBytesToAppend;
Mike Stump1eb44332009-09-09 15:08:12 +0000268
Anders Carlsson45372a62009-07-23 03:17:50 +0000269 if (FieldOffset < NextFieldOffset) {
270 assert(BitsAvailableInLastField && "Bitfield size mismatch!");
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000271 assert(NextFieldOffsetInBytes && "Must have laid out at least one byte!");
Mike Stump1eb44332009-09-09 15:08:12 +0000272
Anders Carlsson45372a62009-07-23 03:17:50 +0000273 // The bitfield begins in the previous bit-field.
Mike Stump1eb44332009-09-09 15:08:12 +0000274 NumBytesToAppend =
Anders Carlsson45372a62009-07-23 03:17:50 +0000275 llvm::RoundUpToAlignment(FieldSize - BitsAvailableInLastField, 8) / 8;
276 } else {
277 assert(FieldOffset % 8 == 0 && "Field offset not aligned correctly");
278
279 // Append padding if necessary.
280 AppendBytes((FieldOffset - NextFieldOffset) / 8);
Mike Stump1eb44332009-09-09 15:08:12 +0000281
282 NumBytesToAppend =
Anders Carlsson45372a62009-07-23 03:17:50 +0000283 llvm::RoundUpToAlignment(FieldSize, 8) / 8;
Mike Stump1eb44332009-09-09 15:08:12 +0000284
Anders Carlsson45372a62009-07-23 03:17:50 +0000285 assert(NumBytesToAppend && "No bytes to append!");
286 }
287
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000288 // Add the bit field info.
289 LLVMBitFields.push_back(
290 LLVMBitFieldInfo(D, ComputeBitFieldInfo(Types, D, FieldOffset, FieldSize)));
Mike Stump1eb44332009-09-09 15:08:12 +0000291
Anders Carlsson45372a62009-07-23 03:17:50 +0000292 AppendBytes(NumBytesToAppend);
Mike Stump1eb44332009-09-09 15:08:12 +0000293
Mike Stump1eb44332009-09-09 15:08:12 +0000294 BitsAvailableInLastField =
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000295 NextFieldOffsetInBytes * 8 - (FieldOffset + FieldSize);
Anders Carlsson45372a62009-07-23 03:17:50 +0000296}
297
298bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
299 uint64_t FieldOffset) {
Anders Carlsson45372a62009-07-23 03:17:50 +0000300 // If the field is packed, then we need a packed struct.
Anders Carlssona860e752009-08-08 18:23:56 +0000301 if (!Packed && D->hasAttr<PackedAttr>())
Anders Carlsson45372a62009-07-23 03:17:50 +0000302 return false;
303
304 if (D->isBitField()) {
305 // We must use packed structs for unnamed bit fields since they
306 // don't affect the struct alignment.
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000307 if (!Packed && !D->getDeclName())
Anders Carlsson45372a62009-07-23 03:17:50 +0000308 return false;
Mike Stump1eb44332009-09-09 15:08:12 +0000309
Anders Carlsson45372a62009-07-23 03:17:50 +0000310 LayoutBitField(D, FieldOffset);
311 return true;
312 }
Mike Stump1eb44332009-09-09 15:08:12 +0000313
Anders Carlsson2c12d032010-02-02 05:17:25 +0000314 // Check if we have a pointer to data member in this field.
315 CheckForPointerToDataMember(D->getType());
Daniel Dunbar270e2032010-03-31 00:11:27 +0000316
Anders Carlsson45372a62009-07-23 03:17:50 +0000317 assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
Anders Carlsson45372a62009-07-23 03:17:50 +0000318 uint64_t FieldOffsetInBytes = FieldOffset / 8;
Mike Stump1eb44332009-09-09 15:08:12 +0000319
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000320 const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
321 unsigned TypeAlignment = getTypeAlignment(Ty);
322
Anders Carlssona5dd7222009-08-08 19:38:24 +0000323 // If the type alignment is larger then the struct alignment, we must use
324 // a packed struct.
325 if (TypeAlignment > Alignment) {
326 assert(!Packed && "Alignment is wrong even with packed struct!");
327 return false;
328 }
Mike Stump1eb44332009-09-09 15:08:12 +0000329
Anders Carlssona5dd7222009-08-08 19:38:24 +0000330 if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
331 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
332 if (const PragmaPackAttr *PPA = RD->getAttr<PragmaPackAttr>()) {
333 if (PPA->getAlignment() != TypeAlignment * 8 && !Packed)
334 return false;
335 }
336 }
337
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000338 // Round up the field offset to the alignment of the field type.
Mike Stump1eb44332009-09-09 15:08:12 +0000339 uint64_t AlignedNextFieldOffsetInBytes =
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000340 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, TypeAlignment);
341
342 if (FieldOffsetInBytes < AlignedNextFieldOffsetInBytes) {
343 assert(!Packed && "Could not place field even with packed struct!");
344 return false;
345 }
Mike Stump1eb44332009-09-09 15:08:12 +0000346
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000347 if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
348 // Even with alignment, the field offset is not at the right place,
349 // insert padding.
350 uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
Mike Stump1eb44332009-09-09 15:08:12 +0000351
Anders Carlssonde9f2c92009-08-04 16:29:15 +0000352 AppendBytes(PaddingInBytes);
353 }
Mike Stump1eb44332009-09-09 15:08:12 +0000354
Anders Carlsson45372a62009-07-23 03:17:50 +0000355 // Now append the field.
356 LLVMFields.push_back(LLVMFieldInfo(D, FieldTypes.size()));
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000357 AppendField(FieldOffsetInBytes, Ty);
Mike Stump1eb44332009-09-09 15:08:12 +0000358
Anders Carlsson45372a62009-07-23 03:17:50 +0000359 return true;
360}
361
Anders Carlsson86664462010-04-17 20:49:27 +0000362const llvm::Type *
363CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
364 const ASTRecordLayout &Layout) {
365 if (Field->isBitField()) {
366 uint64_t FieldSize =
367 Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
368
369 // Ignore zero sized bit fields.
370 if (FieldSize == 0)
371 return 0;
372
Daniel Dunbar8ab78a72010-04-20 17:52:30 +0000373 const llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
374 unsigned NumBytesToAppend =
375 llvm::RoundUpToAlignment(FieldSize, 8) / 8;
Anders Carlssond62328e2010-04-17 21:04:52 +0000376
Daniel Dunbar8ab78a72010-04-20 17:52:30 +0000377 if (NumBytesToAppend > 1)
378 FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend);
Anders Carlssond62328e2010-04-17 21:04:52 +0000379
Anders Carlsson86664462010-04-17 20:49:27 +0000380 // Add the bit field info.
381 LLVMBitFields.push_back(
382 LLVMBitFieldInfo(Field, ComputeBitFieldInfo(Types, Field, 0, FieldSize)));
Anders Carlssond62328e2010-04-17 21:04:52 +0000383 return FieldTy;
Anders Carlsson86664462010-04-17 20:49:27 +0000384 }
Daniel Dunbar8ab78a72010-04-20 17:52:30 +0000385
Anders Carlsson86664462010-04-17 20:49:27 +0000386 // This is a regular union field.
387 LLVMFields.push_back(LLVMFieldInfo(Field, 0));
388 return Types.ConvertTypeForMemRecursive(Field->getType());
389}
390
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000391void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
392 assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
Mike Stump1eb44332009-09-09 15:08:12 +0000393
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000394 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
Mike Stump1eb44332009-09-09 15:08:12 +0000395
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000396 const llvm::Type *Ty = 0;
397 uint64_t Size = 0;
398 unsigned Align = 0;
Mike Stump1eb44332009-09-09 15:08:12 +0000399
Anders Carlsson21fd7d72010-01-28 18:22:03 +0000400 bool HasOnlyZeroSizedBitFields = true;
Daniel Dunbar270e2032010-03-31 00:11:27 +0000401
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000402 unsigned FieldNo = 0;
Mike Stump1eb44332009-09-09 15:08:12 +0000403 for (RecordDecl::field_iterator Field = D->field_begin(),
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000404 FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
Mike Stump1eb44332009-09-09 15:08:12 +0000405 assert(Layout.getFieldOffset(FieldNo) == 0 &&
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000406 "Union field offset did not start at the beginning of record!");
Anders Carlsson86664462010-04-17 20:49:27 +0000407 const llvm::Type *FieldTy = LayoutUnionField(*Field, Layout);
Anders Carlsson2cc8f172009-07-23 04:00:39 +0000408
Anders Carlsson86664462010-04-17 20:49:27 +0000409 if (!FieldTy)
410 continue;
Mike Stump1eb44332009-09-09 15:08:12 +0000411
Anders Carlsson21fd7d72010-01-28 18:22:03 +0000412 HasOnlyZeroSizedBitFields = false;
Daniel Dunbar270e2032010-03-31 00:11:27 +0000413
Anders Carlsson177d4d82009-07-23 21:52:03 +0000414 unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
415 uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
Mike Stump1eb44332009-09-09 15:08:12 +0000416
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000417 if (FieldAlign < Align)
418 continue;
Mike Stump1eb44332009-09-09 15:08:12 +0000419
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000420 if (FieldAlign > Align || FieldSize > Size) {
421 Ty = FieldTy;
422 Align = FieldAlign;
423 Size = FieldSize;
424 }
425 }
Mike Stump1eb44332009-09-09 15:08:12 +0000426
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000427 // Now add our field.
Anders Carlsson36620002009-09-03 22:56:02 +0000428 if (Ty) {
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000429 AppendField(0, Ty);
Anders Carlsson36620002009-09-03 22:56:02 +0000430
431 if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) {
432 // We need a packed struct.
433 Packed = true;
434 Align = 1;
435 }
436 }
Fariborz Jahaniane5041702009-11-06 20:47:40 +0000437 if (!Align) {
Anders Carlsson21fd7d72010-01-28 18:22:03 +0000438 assert(HasOnlyZeroSizedBitFields &&
439 "0-align record did not have all zero-sized bit-fields!");
Fariborz Jahaniane5041702009-11-06 20:47:40 +0000440 Align = 1;
441 }
Daniel Dunbar270e2032010-03-31 00:11:27 +0000442
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000443 // Append tail padding.
444 if (Layout.getSize() / 8 > Size)
445 AppendPadding(Layout.getSize() / 8, Align);
446}
447
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000448void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
449 uint64_t BaseOffset) {
450 const ASTRecordLayout &Layout =
451 Types.getContext().getASTRecordLayout(BaseDecl);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000452
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000453 uint64_t NonVirtualSize = Layout.getNonVirtualSize();
454
455 if (BaseDecl->isEmpty()) {
456 // FIXME: Lay out empty bases.
457 return;
458 }
459
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000460 CheckForPointerToDataMember(BaseDecl);
461
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000462 // FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can.
463 AppendPadding(BaseOffset / 8, 1);
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000464
465 // Append the base field.
466 LLVMNonVirtualBases.push_back(LLVMBaseInfo(BaseDecl, FieldTypes.size()));
467
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000468 AppendBytes(NonVirtualSize / 8);
469}
470
471void
472CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
473 const ASTRecordLayout &Layout) {
474 const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
475
476 // Check if we need to add a vtable pointer.
477 if (RD->isDynamicClass()) {
478 if (!PrimaryBase) {
479 const llvm::Type *FunctionType =
480 llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
481 /*isVarArg=*/true);
482 const llvm::Type *VTableTy = FunctionType->getPointerTo();
483
484 assert(NextFieldOffsetInBytes == 0 &&
485 "VTable pointer must come first!");
486 AppendField(NextFieldOffsetInBytes, VTableTy->getPointerTo());
487 } else {
488 // FIXME: Handle a virtual primary base.
489 if (!Layout.getPrimaryBaseWasVirtual())
490 LayoutNonVirtualBase(PrimaryBase, 0);
491 }
492 }
493
494 // Layout the non-virtual bases.
495 for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
496 E = RD->bases_end(); I != E; ++I) {
497 if (I->isVirtual())
498 continue;
499
500 const CXXRecordDecl *BaseDecl =
501 cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
502
503 // We've already laid out the primary base.
504 if (BaseDecl == PrimaryBase && !Layout.getPrimaryBaseWasVirtual())
505 continue;
506
507 LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl));
Anders Carlsson4b3e5be2009-12-16 17:27:20 +0000508 }
509}
510
Anders Carlsson45372a62009-07-23 03:17:50 +0000511bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
512 assert(!D->isUnion() && "Can't call LayoutFields on a union!");
Anders Carlssona5dd7222009-08-08 19:38:24 +0000513 assert(Alignment && "Did not set alignment!");
Mike Stump1eb44332009-09-09 15:08:12 +0000514
Anders Carlsson5a6e3982009-07-23 03:43:54 +0000515 const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
Mike Stump1eb44332009-09-09 15:08:12 +0000516
Anders Carlsson4b3e5be2009-12-16 17:27:20 +0000517 if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D))
Anders Carlsson15ddfdc2010-05-18 05:12:20 +0000518 LayoutNonVirtualBases(RD, Layout);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000519
Anders Carlsson45372a62009-07-23 03:17:50 +0000520 unsigned FieldNo = 0;
Fariborz Jahaniancad86652009-07-27 20:57:45 +0000521
Mike Stump1eb44332009-09-09 15:08:12 +0000522 for (RecordDecl::field_iterator Field = D->field_begin(),
Anders Carlsson45372a62009-07-23 03:17:50 +0000523 FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
524 if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
Mike Stump1eb44332009-09-09 15:08:12 +0000525 assert(!Packed &&
Anders Carlsson45372a62009-07-23 03:17:50 +0000526 "Could not layout fields even with a packed LLVM struct!");
527 return false;
528 }
529 }
530
531 // Append tail padding if necessary.
Anders Carlssonc1efe362009-07-27 14:55:54 +0000532 AppendTailPadding(Layout.getSize());
Mike Stump1eb44332009-09-09 15:08:12 +0000533
Anders Carlsson45372a62009-07-23 03:17:50 +0000534 return true;
535}
536
Anders Carlssonc1efe362009-07-27 14:55:54 +0000537void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
538 assert(RecordSize % 8 == 0 && "Invalid record size!");
Mike Stump1eb44332009-09-09 15:08:12 +0000539
Anders Carlssonc1efe362009-07-27 14:55:54 +0000540 uint64_t RecordSizeInBytes = RecordSize / 8;
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000541 assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
Mike Stump1eb44332009-09-09 15:08:12 +0000542
Daniel Dunbar270e2032010-03-31 00:11:27 +0000543 uint64_t AlignedNextFieldOffset =
Anders Carlssonc2456822009-12-08 01:24:23 +0000544 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct);
545
546 if (AlignedNextFieldOffset == RecordSizeInBytes) {
547 // We don't need any padding.
548 return;
549 }
Daniel Dunbar270e2032010-03-31 00:11:27 +0000550
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000551 unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
Anders Carlssonc1efe362009-07-27 14:55:54 +0000552 AppendBytes(NumPadBytes);
553}
554
Mike Stump1eb44332009-09-09 15:08:12 +0000555void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
Anders Carlsson45372a62009-07-23 03:17:50 +0000556 const llvm::Type *FieldTy) {
557 AlignmentAsLLVMStruct = std::max(AlignmentAsLLVMStruct,
558 getTypeAlignment(FieldTy));
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000559
Daniel Dunbar9b28daf2010-04-12 21:01:28 +0000560 uint64_t FieldSizeInBytes = Types.getTargetData().getTypeAllocSize(FieldTy);
Anders Carlsson728d7cd2009-07-24 02:45:50 +0000561
Anders Carlsson45372a62009-07-23 03:17:50 +0000562 FieldTypes.push_back(FieldTy);
Anders Carlsson45372a62009-07-23 03:17:50 +0000563
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000564 NextFieldOffsetInBytes = FieldOffsetInBytes + FieldSizeInBytes;
Anders Carlsson45372a62009-07-23 03:17:50 +0000565 BitsAvailableInLastField = 0;
566}
567
Mike Stump1eb44332009-09-09 15:08:12 +0000568void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
Anders Carlsson45372a62009-07-23 03:17:50 +0000569 unsigned FieldAlignment) {
Anders Carlsson45372a62009-07-23 03:17:50 +0000570 assert(NextFieldOffsetInBytes <= FieldOffsetInBytes &&
571 "Incorrect field layout!");
Mike Stump1eb44332009-09-09 15:08:12 +0000572
Anders Carlsson45372a62009-07-23 03:17:50 +0000573 // Round up the field offset to the alignment of the field type.
Mike Stump1eb44332009-09-09 15:08:12 +0000574 uint64_t AlignedNextFieldOffsetInBytes =
Anders Carlsson45372a62009-07-23 03:17:50 +0000575 llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
576
577 if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
578 // Even with alignment, the field offset is not at the right place,
579 // insert padding.
580 uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
581
582 AppendBytes(PaddingInBytes);
583 }
584}
585
586void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) {
587 if (NumBytes == 0)
588 return;
Mike Stump1eb44332009-09-09 15:08:12 +0000589
Owen Anderson0032b272009-08-13 21:57:51 +0000590 const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
Anders Carlssonc1efe362009-07-27 14:55:54 +0000591 if (NumBytes > 1)
Anders Carlsson45372a62009-07-23 03:17:50 +0000592 Ty = llvm::ArrayType::get(Ty, NumBytes);
Mike Stump1eb44332009-09-09 15:08:12 +0000593
Anders Carlsson45372a62009-07-23 03:17:50 +0000594 // Append the padding field
Anders Carlssonc2cc1d52009-07-28 17:56:36 +0000595 AppendField(NextFieldOffsetInBytes, Ty);
Anders Carlsson45372a62009-07-23 03:17:50 +0000596}
597
598unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000599 if (Packed)
Anders Carlsson45372a62009-07-23 03:17:50 +0000600 return 1;
Mike Stump1eb44332009-09-09 15:08:12 +0000601
Anders Carlsson45372a62009-07-23 03:17:50 +0000602 return Types.getTargetData().getABITypeAlignment(Ty);
603}
604
Anders Carlsson2c12d032010-02-02 05:17:25 +0000605void CGRecordLayoutBuilder::CheckForPointerToDataMember(QualType T) {
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000606 // This record already contains a member pointer.
Anders Carlsson2c12d032010-02-02 05:17:25 +0000607 if (ContainsPointerToDataMember)
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000608 return;
Mike Stump1eb44332009-09-09 15:08:12 +0000609
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000610 // Can only have member pointers if we're compiling C++.
611 if (!Types.getContext().getLangOptions().CPlusPlus)
612 return;
Mike Stump1eb44332009-09-09 15:08:12 +0000613
Anders Carlsson2c12d032010-02-02 05:17:25 +0000614 T = Types.getContext().getBaseElementType(T);
Mike Stump1eb44332009-09-09 15:08:12 +0000615
Anders Carlsson2c12d032010-02-02 05:17:25 +0000616 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
617 if (!MPT->getPointeeType()->isFunctionType()) {
618 // We have a pointer to data member.
619 ContainsPointerToDataMember = true;
620 }
621 } else if (const RecordType *RT = T->getAs<RecordType>()) {
622 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
Daniel Dunbar270e2032010-03-31 00:11:27 +0000623
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000624 return CheckForPointerToDataMember(RD);
Daniel Dunbar270e2032010-03-31 00:11:27 +0000625 }
Anders Carlssonfc3eaa42009-08-23 01:25:01 +0000626}
627
Anders Carlssona83fb4b2010-05-18 16:51:41 +0000628void
629CGRecordLayoutBuilder::CheckForPointerToDataMember(const CXXRecordDecl *RD) {
630 // This record already contains a member pointer.
631 if (ContainsPointerToDataMember)
632 return;
633
634 // FIXME: It would be better if there was a way to explicitly compute the
635 // record layout instead of converting to a type.
636 Types.ConvertTagDeclType(RD);
637
638 const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
639
640 if (Layout.containsPointerToDataMember())
641 ContainsPointerToDataMember = true;
642}
643
Daniel Dunbar270e2032010-03-31 00:11:27 +0000644CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
645 CGRecordLayoutBuilder Builder(*this);
Mike Stump1eb44332009-09-09 15:08:12 +0000646
Anders Carlsson45372a62009-07-23 03:17:50 +0000647 Builder.Layout(D);
Anders Carlsson4c98efd2009-07-24 15:20:52 +0000648
Daniel Dunbar270e2032010-03-31 00:11:27 +0000649 const llvm::Type *Ty = llvm::StructType::get(getLLVMContext(),
Owen Anderson47a434f2009-08-05 23:18:46 +0000650 Builder.FieldTypes,
Anders Carlsson4b5584b2009-07-23 17:24:40 +0000651 Builder.Packed);
Mike Stump1eb44332009-09-09 15:08:12 +0000652
Daniel Dunbar198bcb42010-03-31 01:09:11 +0000653 CGRecordLayout *RL =
654 new CGRecordLayout(Ty, Builder.ContainsPointerToDataMember);
655
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000656 // Add all the non-virtual base field numbers.
657 RL->NonVirtualBaseFields.insert(Builder.LLVMNonVirtualBases.begin(),
658 Builder.LLVMNonVirtualBases.end());
659
Anders Carlsson45372a62009-07-23 03:17:50 +0000660 // Add all the field numbers.
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000661 RL->FieldInfo.insert(Builder.LLVMFields.begin(),
662 Builder.LLVMFields.end());
Anders Carlsson45372a62009-07-23 03:17:50 +0000663
664 // Add bitfield info.
Anders Carlssonc6772ce2010-05-18 05:22:06 +0000665 RL->BitFields.insert(Builder.LLVMBitFields.begin(),
666 Builder.LLVMBitFields.end());
Mike Stump1eb44332009-09-09 15:08:12 +0000667
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000668 // Dump the layout, if requested.
Daniel Dunbarab970f92010-04-13 20:58:55 +0000669 if (getContext().getLangOptions().DumpRecordLayouts) {
Daniel Dunbar8d8ab742010-04-19 20:44:53 +0000670 llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
Daniel Dunbarab970f92010-04-13 20:58:55 +0000671 llvm::errs() << "Record: ";
672 D->dump();
673 llvm::errs() << "\nLayout: ";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000674 RL->dump();
Daniel Dunbarab970f92010-04-13 20:58:55 +0000675 }
Daniel Dunbar93c62962010-04-12 18:14:18 +0000676
Daniel Dunbare1467a42010-04-22 02:35:46 +0000677#ifndef NDEBUG
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000678 // Verify that the computed LLVM struct size matches the AST layout size.
Daniel Dunbare1467a42010-04-22 02:35:46 +0000679 uint64_t TypeSizeInBits = getContext().getASTRecordLayout(D).getSize();
680 assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000681 "Type size mismatch!");
682
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000683 // Verify that the LLVM and AST field offsets agree.
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000684 const llvm::StructType *ST =
685 dyn_cast<llvm::StructType>(RL->getLLVMType());
686 const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
687
688 const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
689 RecordDecl::field_iterator it = D->field_begin();
690 for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
691 const FieldDecl *FD = *it;
Daniel Dunbare1467a42010-04-22 02:35:46 +0000692
693 // For non-bit-fields, just check that the LLVM struct offset matches the
694 // AST offset.
695 if (!FD->isBitField()) {
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000696 unsigned FieldNo = RL->getLLVMFieldNo(FD);
697 assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
698 "Invalid field offset!");
Daniel Dunbare1467a42010-04-22 02:35:46 +0000699 continue;
700 }
701
702 // Ignore unnamed bit-fields.
703 if (!FD->getDeclName())
704 continue;
705
706 const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
707 for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
708 const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
709
710 // Verify that every component access is within the structure.
711 uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
712 uint64_t AccessBitOffset = FieldOffset + AI.FieldByteOffset * 8;
713 assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
714 "Invalid bit-field access (out of range)!");
Daniel Dunbar3b2ae7a2010-04-21 19:10:49 +0000715 }
716 }
717#endif
Daniel Dunbar2e7b7c22010-04-19 20:44:47 +0000718
Daniel Dunbar198bcb42010-03-31 01:09:11 +0000719 return RL;
Anders Carlsson45372a62009-07-23 03:17:50 +0000720}
Daniel Dunbar93c62962010-04-12 18:14:18 +0000721
722void CGRecordLayout::print(llvm::raw_ostream &OS) const {
723 OS << "<CGRecordLayout\n";
724 OS << " LLVMType:" << *LLVMType << "\n";
725 OS << " ContainsPointerToDataMember:" << ContainsPointerToDataMember << "\n";
726 OS << " BitFields:[\n";
Daniel Dunbarad759532010-04-22 02:35:36 +0000727
728 // Print bit-field infos in declaration order.
729 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
Daniel Dunbar93c62962010-04-12 18:14:18 +0000730 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
731 it = BitFields.begin(), ie = BitFields.end();
732 it != ie; ++it) {
Daniel Dunbarad759532010-04-22 02:35:36 +0000733 const RecordDecl *RD = it->first->getParent();
734 unsigned Index = 0;
735 for (RecordDecl::field_iterator
736 it2 = RD->field_begin(); *it2 != it->first; ++it2)
737 ++Index;
738 BFIs.push_back(std::make_pair(Index, &it->second));
739 }
740 llvm::array_pod_sort(BFIs.begin(), BFIs.end());
741 for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
Daniel Dunbarab970f92010-04-13 20:58:55 +0000742 OS.indent(4);
Daniel Dunbarad759532010-04-22 02:35:36 +0000743 BFIs[i].second->print(OS);
Daniel Dunbar93c62962010-04-12 18:14:18 +0000744 OS << "\n";
745 }
Daniel Dunbarad759532010-04-22 02:35:36 +0000746
Daniel Dunbar93c62962010-04-12 18:14:18 +0000747 OS << "]>\n";
748}
749
750void CGRecordLayout::dump() const {
751 print(llvm::errs());
752}
753
754void CGBitFieldInfo::print(llvm::raw_ostream &OS) const {
755 OS << "<CGBitFieldInfo";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000756 OS << " Size:" << Size;
Daniel Dunbarab970f92010-04-13 20:58:55 +0000757 OS << " IsSigned:" << IsSigned << "\n";
758
759 OS.indent(4 + strlen("<CGBitFieldInfo"));
760 OS << " NumComponents:" << getNumComponents();
761 OS << " Components: [";
762 if (getNumComponents()) {
763 OS << "\n";
764 for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
765 const AccessInfo &AI = getComponent(i);
766 OS.indent(8);
767 OS << "<AccessInfo"
768 << " FieldIndex:" << AI.FieldIndex
769 << " FieldByteOffset:" << AI.FieldByteOffset
770 << " FieldBitStart:" << AI.FieldBitStart
771 << " AccessWidth:" << AI.AccessWidth << "\n";
772 OS.indent(8 + strlen("<AccessInfo"));
773 OS << " AccessAlignment:" << AI.AccessAlignment
774 << " TargetBitOffset:" << AI.TargetBitOffset
775 << " TargetBitWidth:" << AI.TargetBitWidth
776 << ">\n";
777 }
778 OS.indent(4);
779 }
780 OS << "]>";
Daniel Dunbar93c62962010-04-12 18:14:18 +0000781}
782
783void CGBitFieldInfo::dump() const {
784 print(llvm::errs());
785}