blob: a9d883b8579ca6a9eb6bfd1590302174b91899b1 [file] [log] [blame]
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001//===---- TargetABIInfo.cpp - Encapsulate target ABI details ----*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "ABIInfo.h"
16#include "CodeGenFunction.h"
Anders Carlsson15b73de2009-07-18 19:43:29 +000017#include "clang/AST/RecordLayout.h"
Anton Korobeynikov244360d2009-06-05 22:08:42 +000018#include "llvm/Type.h"
Daniel Dunbare3532f82009-08-24 08:52:16 +000019#include "llvm/ADT/Triple.h"
Torok Edwindb714922009-08-24 13:25:12 +000020#include <cstdio>
Anton Korobeynikov244360d2009-06-05 22:08:42 +000021
22using namespace clang;
23using namespace CodeGen;
24
25ABIInfo::~ABIInfo() {}
26
27void ABIArgInfo::dump() const {
28 fprintf(stderr, "(ABIArgInfo Kind=");
29 switch (TheKind) {
30 case Direct:
31 fprintf(stderr, "Direct");
32 break;
Anton Korobeynikov18adbf52009-06-06 09:36:29 +000033 case Extend:
34 fprintf(stderr, "Extend");
35 break;
Anton Korobeynikov244360d2009-06-05 22:08:42 +000036 case Ignore:
37 fprintf(stderr, "Ignore");
38 break;
39 case Coerce:
40 fprintf(stderr, "Coerce Type=");
41 getCoerceToType()->print(llvm::errs());
42 break;
43 case Indirect:
44 fprintf(stderr, "Indirect Align=%d", getIndirectAlign());
45 break;
46 case Expand:
47 fprintf(stderr, "Expand");
48 break;
49 }
50 fprintf(stderr, ")\n");
51}
52
Daniel Dunbar626f1d82009-09-13 08:03:58 +000053static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
Anton Korobeynikov244360d2009-06-05 22:08:42 +000054
55/// isEmptyField - Return true iff a the field is "empty", that is it
56/// is an unnamed bit-field or an (array of) empty record(s).
Daniel Dunbar626f1d82009-09-13 08:03:58 +000057static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
58 bool AllowArrays) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +000059 if (FD->isUnnamedBitfield())
60 return true;
61
62 QualType FT = FD->getType();
Anton Korobeynikov244360d2009-06-05 22:08:42 +000063
Daniel Dunbar626f1d82009-09-13 08:03:58 +000064 // Constant arrays of empty records count as empty, strip them off.
65 if (AllowArrays)
66 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
67 FT = AT->getElementType();
68
69 return isEmptyRecord(Context, FT, AllowArrays);
Anton Korobeynikov244360d2009-06-05 22:08:42 +000070}
71
72/// isEmptyRecord - Return true iff a structure contains only empty
73/// fields. Note that a structure with a flexible array member is not
74/// considered empty.
Daniel Dunbar626f1d82009-09-13 08:03:58 +000075static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
Ted Kremenekc23c7e62009-07-29 21:53:49 +000076 const RecordType *RT = T->getAs<RecordType>();
Anton Korobeynikov244360d2009-06-05 22:08:42 +000077 if (!RT)
78 return 0;
79 const RecordDecl *RD = RT->getDecl();
80 if (RD->hasFlexibleArrayMember())
81 return false;
Argyrios Kyrtzidiscfbfe782009-06-30 02:36:12 +000082 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
83 i != e; ++i)
Daniel Dunbar626f1d82009-09-13 08:03:58 +000084 if (!isEmptyField(Context, *i, AllowArrays))
Anton Korobeynikov244360d2009-06-05 22:08:42 +000085 return false;
86 return true;
87}
88
Anders Carlsson20759ad2009-09-16 15:53:40 +000089/// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either
90/// a non-trivial destructor or a non-trivial copy constructor.
91static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) {
92 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
93 if (!RD)
94 return false;
95
96 return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor();
97}
98
99/// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is
100/// a record type with either a non-trivial destructor or a non-trivial copy
101/// constructor.
102static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) {
103 const RecordType *RT = T->getAs<RecordType>();
104 if (!RT)
105 return false;
106
107 return hasNonTrivialDestructorOrCopyConstructor(RT);
108}
109
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000110/// isSingleElementStruct - Determine if a structure is a "single
111/// element struct", i.e. it has exactly one non-empty field or
112/// exactly one field which is itself a single element
113/// struct. Structures with flexible array members are never
114/// considered single element structs.
115///
116/// \return The field declaration for the single non-empty field, if
117/// it exists.
118static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
119 const RecordType *RT = T->getAsStructureType();
120 if (!RT)
121 return 0;
122
123 const RecordDecl *RD = RT->getDecl();
124 if (RD->hasFlexibleArrayMember())
125 return 0;
126
127 const Type *Found = 0;
Argyrios Kyrtzidiscfbfe782009-06-30 02:36:12 +0000128 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
129 i != e; ++i) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000130 const FieldDecl *FD = *i;
131 QualType FT = FD->getType();
132
133 // Ignore empty fields.
Daniel Dunbar626f1d82009-09-13 08:03:58 +0000134 if (isEmptyField(Context, FD, true))
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000135 continue;
136
137 // If we already found an element then this isn't a single-element
138 // struct.
139 if (Found)
140 return 0;
141
142 // Treat single element arrays as the element.
143 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
144 if (AT->getSize().getZExtValue() != 1)
145 break;
146 FT = AT->getElementType();
147 }
148
149 if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
150 Found = FT.getTypePtr();
151 } else {
152 Found = isSingleElementStruct(FT, Context);
153 if (!Found)
154 return 0;
155 }
156 }
157
158 return Found;
159}
160
161static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
162 if (!Ty->getAsBuiltinType() && !Ty->isPointerType())
163 return false;
164
165 uint64_t Size = Context.getTypeSize(Ty);
166 return Size == 32 || Size == 64;
167}
168
169static bool areAllFields32Or64BitBasicType(const RecordDecl *RD,
170 ASTContext &Context) {
Argyrios Kyrtzidiscfbfe782009-06-30 02:36:12 +0000171 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
172 i != e; ++i) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000173 const FieldDecl *FD = *i;
174
175 if (!is32Or64BitBasicType(FD->getType(), Context))
176 return false;
177
178 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
179 // how to expand them yet, and the predicate for telling if a bitfield still
180 // counts as "basic" is more complicated than what we were doing previously.
181 if (FD->isBitField())
182 return false;
183 }
184
185 return true;
186}
187
Eli Friedman3192cc82009-06-13 21:37:10 +0000188static bool typeContainsSSEVector(const RecordDecl *RD, ASTContext &Context) {
Argyrios Kyrtzidiscfbfe782009-06-30 02:36:12 +0000189 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
190 i != e; ++i) {
Eli Friedman3192cc82009-06-13 21:37:10 +0000191 const FieldDecl *FD = *i;
192
193 if (FD->getType()->isVectorType() &&
194 Context.getTypeSize(FD->getType()) >= 128)
195 return true;
196
Ted Kremenekc23c7e62009-07-29 21:53:49 +0000197 if (const RecordType* RT = FD->getType()->getAs<RecordType>())
Eli Friedman3192cc82009-06-13 21:37:10 +0000198 if (typeContainsSSEVector(RT->getDecl(), Context))
199 return true;
200 }
201
202 return false;
203}
204
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000205namespace {
206/// DefaultABIInfo - The default implementation for ABI specific
207/// details. This implementation provides information which results in
208/// self-consistent and sensible LLVM IR generation, but does not
209/// conform to any particular ABI.
210class DefaultABIInfo : public ABIInfo {
211 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +0000212 ASTContext &Context,
213 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000214
215 ABIArgInfo classifyArgumentType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +0000216 ASTContext &Context,
217 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000218
Owen Anderson170229f2009-07-14 23:10:40 +0000219 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
220 llvm::LLVMContext &VMContext) const {
221 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
222 VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000223 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
224 it != ie; ++it)
Owen Anderson170229f2009-07-14 23:10:40 +0000225 it->info = classifyArgumentType(it->type, Context, VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000226 }
227
228 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
229 CodeGenFunction &CGF) const;
230};
231
232/// X86_32ABIInfo - The X86-32 ABI information.
233class X86_32ABIInfo : public ABIInfo {
234 ASTContext &Context;
David Chisnallde3a0692009-08-17 23:08:21 +0000235 bool IsDarwinVectorABI;
236 bool IsSmallStructInRegABI;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000237
238 static bool isRegisterSize(unsigned Size) {
239 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
240 }
241
242 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
243
Eli Friedman3192cc82009-06-13 21:37:10 +0000244 static unsigned getIndirectArgumentAlignment(QualType Ty,
245 ASTContext &Context);
246
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000247public:
248 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +0000249 ASTContext &Context,
250 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000251
252 ABIArgInfo classifyArgumentType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +0000253 ASTContext &Context,
254 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000255
Owen Anderson170229f2009-07-14 23:10:40 +0000256 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
257 llvm::LLVMContext &VMContext) const {
258 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
259 VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000260 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
261 it != ie; ++it)
Owen Anderson170229f2009-07-14 23:10:40 +0000262 it->info = classifyArgumentType(it->type, Context, VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000263 }
264
265 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
266 CodeGenFunction &CGF) const;
267
David Chisnallde3a0692009-08-17 23:08:21 +0000268 X86_32ABIInfo(ASTContext &Context, bool d, bool p)
Mike Stump11289f42009-09-09 15:08:12 +0000269 : ABIInfo(), Context(Context), IsDarwinVectorABI(d),
David Chisnallde3a0692009-08-17 23:08:21 +0000270 IsSmallStructInRegABI(p) {}
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000271};
272}
273
274
275/// shouldReturnTypeInRegister - Determine if the given type should be
276/// passed in a register (for the Darwin ABI).
277bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
278 ASTContext &Context) {
279 uint64_t Size = Context.getTypeSize(Ty);
280
281 // Type must be register sized.
282 if (!isRegisterSize(Size))
283 return false;
284
285 if (Ty->isVectorType()) {
286 // 64- and 128- bit vectors inside structures are not returned in
287 // registers.
288 if (Size == 64 || Size == 128)
289 return false;
290
291 return true;
292 }
293
294 // If this is a builtin, pointer, or complex type, it is ok.
295 if (Ty->getAsBuiltinType() || Ty->isPointerType() || Ty->isAnyComplexType())
296 return true;
297
298 // Arrays are treated like records.
299 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
300 return shouldReturnTypeInRegister(AT->getElementType(), Context);
301
302 // Otherwise, it must be a record type.
Ted Kremenekc23c7e62009-07-29 21:53:49 +0000303 const RecordType *RT = Ty->getAs<RecordType>();
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000304 if (!RT) return false;
305
306 // Structure types are passed in register if all fields would be
307 // passed in a register.
Argyrios Kyrtzidiscfbfe782009-06-30 02:36:12 +0000308 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
309 e = RT->getDecl()->field_end(); i != e; ++i) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000310 const FieldDecl *FD = *i;
311
312 // Empty fields are ignored.
Daniel Dunbar626f1d82009-09-13 08:03:58 +0000313 if (isEmptyField(Context, FD, true))
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000314 continue;
315
316 // Check fields recursively.
317 if (!shouldReturnTypeInRegister(FD->getType(), Context))
318 return false;
319 }
320
321 return true;
322}
323
324ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +0000325 ASTContext &Context,
326 llvm::LLVMContext &VMContext) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000327 if (RetTy->isVoidType()) {
328 return ABIArgInfo::getIgnore();
329 } else if (const VectorType *VT = RetTy->getAsVectorType()) {
330 // On Darwin, some vectors are returned in registers.
David Chisnallde3a0692009-08-17 23:08:21 +0000331 if (IsDarwinVectorABI) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000332 uint64_t Size = Context.getTypeSize(RetTy);
333
334 // 128-bit vectors are a special case; they are returned in
335 // registers and we need to make sure to pick a type the LLVM
336 // backend will like.
337 if (Size == 128)
Owen Anderson41a75022009-08-13 21:57:51 +0000338 return ABIArgInfo::getCoerce(llvm::VectorType::get(
339 llvm::Type::getInt64Ty(VMContext), 2));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000340
341 // Always return in register if it fits in a general purpose
342 // register, or if it is 64 bits and has a single element.
343 if ((Size == 8 || Size == 16 || Size == 32) ||
344 (Size == 64 && VT->getNumElements() == 1))
Owen Anderson41a75022009-08-13 21:57:51 +0000345 return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000346
347 return ABIArgInfo::getIndirect(0);
348 }
349
350 return ABIArgInfo::getDirect();
351 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
352 // Structures with flexible arrays are always indirect.
353 if (const RecordType *RT = RetTy->getAsStructureType())
354 if (RT->getDecl()->hasFlexibleArrayMember())
355 return ABIArgInfo::getIndirect(0);
356
David Chisnallde3a0692009-08-17 23:08:21 +0000357 // If specified, structs and unions are always indirect.
358 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000359 return ABIArgInfo::getIndirect(0);
360
361 // Classify "single element" structs as their element type.
362 if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
363 if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) {
364 if (BT->isIntegerType()) {
365 // We need to use the size of the structure, padding
366 // bit-fields can adjust that to be larger than the single
367 // element type.
368 uint64_t Size = Context.getTypeSize(RetTy);
Owen Anderson170229f2009-07-14 23:10:40 +0000369 return ABIArgInfo::getCoerce(
Owen Anderson41a75022009-08-13 21:57:51 +0000370 llvm::IntegerType::get(VMContext, (unsigned) Size));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000371 } else if (BT->getKind() == BuiltinType::Float) {
372 assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
373 "Unexpect single element structure size!");
Owen Anderson41a75022009-08-13 21:57:51 +0000374 return ABIArgInfo::getCoerce(llvm::Type::getFloatTy(VMContext));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000375 } else if (BT->getKind() == BuiltinType::Double) {
376 assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
377 "Unexpect single element structure size!");
Owen Anderson41a75022009-08-13 21:57:51 +0000378 return ABIArgInfo::getCoerce(llvm::Type::getDoubleTy(VMContext));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000379 }
380 } else if (SeltTy->isPointerType()) {
381 // FIXME: It would be really nice if this could come out as the proper
382 // pointer type.
383 llvm::Type *PtrTy =
Owen Anderson41a75022009-08-13 21:57:51 +0000384 llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000385 return ABIArgInfo::getCoerce(PtrTy);
386 } else if (SeltTy->isVectorType()) {
387 // 64- and 128-bit vectors are never returned in a
388 // register when inside a structure.
389 uint64_t Size = Context.getTypeSize(RetTy);
390 if (Size == 64 || Size == 128)
391 return ABIArgInfo::getIndirect(0);
392
Owen Anderson170229f2009-07-14 23:10:40 +0000393 return classifyReturnType(QualType(SeltTy, 0), Context, VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000394 }
395 }
396
397 // Small structures which are register sized are generally returned
398 // in a register.
399 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
400 uint64_t Size = Context.getTypeSize(RetTy);
Owen Anderson41a75022009-08-13 21:57:51 +0000401 return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000402 }
403
404 return ABIArgInfo::getIndirect(0);
405 } else {
Anton Korobeynikov18adbf52009-06-06 09:36:29 +0000406 return (RetTy->isPromotableIntegerType() ?
407 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000408 }
409}
410
Eli Friedman3192cc82009-06-13 21:37:10 +0000411unsigned X86_32ABIInfo::getIndirectArgumentAlignment(QualType Ty,
412 ASTContext &Context) {
413 unsigned Align = Context.getTypeAlign(Ty);
414 if (Align < 128) return 0;
Ted Kremenekc23c7e62009-07-29 21:53:49 +0000415 if (const RecordType* RT = Ty->getAs<RecordType>())
Eli Friedman3192cc82009-06-13 21:37:10 +0000416 if (typeContainsSSEVector(RT->getDecl(), Context))
417 return 16;
418 return 0;
419}
420
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000421ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
Owen Anderson170229f2009-07-14 23:10:40 +0000422 ASTContext &Context,
423 llvm::LLVMContext &VMContext) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000424 // FIXME: Set alignment on indirect arguments.
425 if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
426 // Structures with flexible arrays are always indirect.
427 if (const RecordType *RT = Ty->getAsStructureType())
428 if (RT->getDecl()->hasFlexibleArrayMember())
Mike Stump11289f42009-09-09 15:08:12 +0000429 return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty,
Eli Friedman3192cc82009-06-13 21:37:10 +0000430 Context));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000431
432 // Ignore empty structs.
Eli Friedman3192cc82009-06-13 21:37:10 +0000433 if (Ty->isStructureType() && Context.getTypeSize(Ty) == 0)
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000434 return ABIArgInfo::getIgnore();
435
436 // Expand structs with size <= 128-bits which consist only of
437 // basic types (int, long long, float, double, xxx*). This is
438 // non-recursive and does not ignore empty fields.
439 if (const RecordType *RT = Ty->getAsStructureType()) {
440 if (Context.getTypeSize(Ty) <= 4*32 &&
441 areAllFields32Or64BitBasicType(RT->getDecl(), Context))
442 return ABIArgInfo::getExpand();
443 }
444
Eli Friedman3192cc82009-06-13 21:37:10 +0000445 return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty, Context));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000446 } else {
Anton Korobeynikov18adbf52009-06-06 09:36:29 +0000447 return (Ty->isPromotableIntegerType() ?
448 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000449 }
450}
451
452llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
453 CodeGenFunction &CGF) const {
Owen Anderson41a75022009-08-13 21:57:51 +0000454 const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(CGF.getLLVMContext()));
Owen Anderson9793f0e2009-07-29 22:16:19 +0000455 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000456
457 CGBuilderTy &Builder = CGF.Builder;
458 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
459 "ap");
460 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
461 llvm::Type *PTy =
Owen Anderson9793f0e2009-07-29 22:16:19 +0000462 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000463 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
464
465 uint64_t Offset =
466 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
467 llvm::Value *NextAddr =
Owen Anderson41a75022009-08-13 21:57:51 +0000468 Builder.CreateGEP(Addr, llvm::ConstantInt::get(
469 llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000470 "ap.next");
471 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
472
473 return AddrTyped;
474}
475
476namespace {
477/// X86_64ABIInfo - The X86_64 ABI information.
478class X86_64ABIInfo : public ABIInfo {
479 enum Class {
480 Integer = 0,
481 SSE,
482 SSEUp,
483 X87,
484 X87Up,
485 ComplexX87,
486 NoClass,
487 Memory
488 };
489
490 /// merge - Implement the X86_64 ABI merging algorithm.
491 ///
492 /// Merge an accumulating classification \arg Accum with a field
493 /// classification \arg Field.
494 ///
495 /// \param Accum - The accumulating classification. This should
496 /// always be either NoClass or the result of a previous merge
497 /// call. In addition, this should never be Memory (the caller
498 /// should just return Memory for the aggregate).
499 Class merge(Class Accum, Class Field) const;
500
501 /// classify - Determine the x86_64 register classes in which the
502 /// given type T should be passed.
503 ///
504 /// \param Lo - The classification for the parts of the type
505 /// residing in the low word of the containing object.
506 ///
507 /// \param Hi - The classification for the parts of the type
508 /// residing in the high word of the containing object.
509 ///
510 /// \param OffsetBase - The bit offset of this type in the
511 /// containing object. Some parameters are classified different
512 /// depending on whether they straddle an eightbyte boundary.
513 ///
514 /// If a word is unused its result will be NoClass; if a type should
515 /// be passed in Memory then at least the classification of \arg Lo
516 /// will be Memory.
517 ///
518 /// The \arg Lo class will be NoClass iff the argument is ignored.
519 ///
520 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
521 /// also be ComplexX87.
522 void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
523 Class &Lo, Class &Hi) const;
524
525 /// getCoerceResult - Given a source type \arg Ty and an LLVM type
526 /// to coerce to, chose the best way to pass Ty in the same place
527 /// that \arg CoerceTo would be passed, but while keeping the
528 /// emitted code as simple as possible.
529 ///
530 /// FIXME: Note, this should be cleaned up to just take an enumeration of all
531 /// the ways we might want to pass things, instead of constructing an LLVM
532 /// type. This makes this code more explicit, and it makes it clearer that we
533 /// are also doing this for correctness in the case of passing scalar types.
534 ABIArgInfo getCoerceResult(QualType Ty,
535 const llvm::Type *CoerceTo,
536 ASTContext &Context) const;
537
538 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
539 /// such that the argument will be passed in memory.
540 ABIArgInfo getIndirectResult(QualType Ty,
541 ASTContext &Context) const;
542
543 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +0000544 ASTContext &Context,
545 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000546
547 ABIArgInfo classifyArgumentType(QualType Ty,
548 ASTContext &Context,
Owen Anderson170229f2009-07-14 23:10:40 +0000549 llvm::LLVMContext &VMContext,
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000550 unsigned &neededInt,
551 unsigned &neededSSE) const;
552
553public:
Owen Anderson170229f2009-07-14 23:10:40 +0000554 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
555 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000556
557 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
558 CodeGenFunction &CGF) const;
559};
560}
561
562X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
563 Class Field) const {
564 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
565 // classified recursively so that always two fields are
566 // considered. The resulting class is calculated according to
567 // the classes of the fields in the eightbyte:
568 //
569 // (a) If both classes are equal, this is the resulting class.
570 //
571 // (b) If one of the classes is NO_CLASS, the resulting class is
572 // the other class.
573 //
574 // (c) If one of the classes is MEMORY, the result is the MEMORY
575 // class.
576 //
577 // (d) If one of the classes is INTEGER, the result is the
578 // INTEGER.
579 //
580 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
581 // MEMORY is used as class.
582 //
583 // (f) Otherwise class SSE is used.
584
585 // Accum should never be memory (we should have returned) or
586 // ComplexX87 (because this cannot be passed in a structure).
587 assert((Accum != Memory && Accum != ComplexX87) &&
588 "Invalid accumulated classification during merge.");
589 if (Accum == Field || Field == NoClass)
590 return Accum;
591 else if (Field == Memory)
592 return Memory;
593 else if (Accum == NoClass)
594 return Field;
595 else if (Accum == Integer || Field == Integer)
596 return Integer;
597 else if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
598 Accum == X87 || Accum == X87Up)
599 return Memory;
600 else
601 return SSE;
602}
603
604void X86_64ABIInfo::classify(QualType Ty,
605 ASTContext &Context,
606 uint64_t OffsetBase,
607 Class &Lo, Class &Hi) const {
608 // FIXME: This code can be simplified by introducing a simple value class for
609 // Class pairs with appropriate constructor methods for the various
610 // situations.
611
612 // FIXME: Some of the split computations are wrong; unaligned vectors
613 // shouldn't be passed in registers for example, so there is no chance they
614 // can straddle an eightbyte. Verify & simplify.
615
616 Lo = Hi = NoClass;
617
618 Class &Current = OffsetBase < 64 ? Lo : Hi;
619 Current = Memory;
620
621 if (const BuiltinType *BT = Ty->getAsBuiltinType()) {
622 BuiltinType::Kind k = BT->getKind();
623
624 if (k == BuiltinType::Void) {
625 Current = NoClass;
626 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
627 Lo = Integer;
628 Hi = Integer;
629 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
630 Current = Integer;
631 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
632 Current = SSE;
633 } else if (k == BuiltinType::LongDouble) {
634 Lo = X87;
635 Hi = X87Up;
636 }
637 // FIXME: _Decimal32 and _Decimal64 are SSE.
638 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
639 } else if (const EnumType *ET = Ty->getAsEnumType()) {
640 // Classify the underlying integer type.
641 classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
642 } else if (Ty->hasPointerRepresentation()) {
643 Current = Integer;
644 } else if (const VectorType *VT = Ty->getAsVectorType()) {
645 uint64_t Size = Context.getTypeSize(VT);
646 if (Size == 32) {
647 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
648 // float> as integer.
649 Current = Integer;
650
651 // If this type crosses an eightbyte boundary, it should be
652 // split.
653 uint64_t EB_Real = (OffsetBase) / 64;
654 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
655 if (EB_Real != EB_Imag)
656 Hi = Lo;
657 } else if (Size == 64) {
658 // gcc passes <1 x double> in memory. :(
659 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
660 return;
661
662 // gcc passes <1 x long long> as INTEGER.
663 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
664 Current = Integer;
665 else
666 Current = SSE;
667
668 // If this type crosses an eightbyte boundary, it should be
669 // split.
670 if (OffsetBase && OffsetBase != 64)
671 Hi = Lo;
672 } else if (Size == 128) {
673 Lo = SSE;
674 Hi = SSEUp;
675 }
676 } else if (const ComplexType *CT = Ty->getAsComplexType()) {
677 QualType ET = Context.getCanonicalType(CT->getElementType());
678
679 uint64_t Size = Context.getTypeSize(Ty);
680 if (ET->isIntegralType()) {
681 if (Size <= 64)
682 Current = Integer;
683 else if (Size <= 128)
684 Lo = Hi = Integer;
685 } else if (ET == Context.FloatTy)
686 Current = SSE;
687 else if (ET == Context.DoubleTy)
688 Lo = Hi = SSE;
689 else if (ET == Context.LongDoubleTy)
690 Current = ComplexX87;
691
692 // If this complex type crosses an eightbyte boundary then it
693 // should be split.
694 uint64_t EB_Real = (OffsetBase) / 64;
695 uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
696 if (Hi == NoClass && EB_Real != EB_Imag)
697 Hi = Lo;
698 } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
699 // Arrays are treated like structures.
700
701 uint64_t Size = Context.getTypeSize(Ty);
702
703 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
704 // than two eightbytes, ..., it has class MEMORY.
705 if (Size > 128)
706 return;
707
708 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
709 // fields, it has class MEMORY.
710 //
711 // Only need to check alignment of array base.
712 if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
713 return;
714
715 // Otherwise implement simplified merge. We could be smarter about
716 // this, but it isn't worth it and would be harder to verify.
717 Current = NoClass;
718 uint64_t EltSize = Context.getTypeSize(AT->getElementType());
719 uint64_t ArraySize = AT->getSize().getZExtValue();
720 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
721 Class FieldLo, FieldHi;
722 classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
723 Lo = merge(Lo, FieldLo);
724 Hi = merge(Hi, FieldHi);
725 if (Lo == Memory || Hi == Memory)
726 break;
727 }
728
729 // Do post merger cleanup (see below). Only case we worry about is Memory.
730 if (Hi == Memory)
731 Lo = Memory;
732 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
Ted Kremenekc23c7e62009-07-29 21:53:49 +0000733 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000734 uint64_t Size = Context.getTypeSize(Ty);
735
736 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
737 // than two eightbytes, ..., it has class MEMORY.
738 if (Size > 128)
739 return;
740
Anders Carlsson20759ad2009-09-16 15:53:40 +0000741 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
742 // copy constructor or a non-trivial destructor, it is passed by invisible
743 // reference.
744 if (hasNonTrivialDestructorOrCopyConstructor(RT))
745 return;
746
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000747 const RecordDecl *RD = RT->getDecl();
748
749 // Assume variable sized types are passed in memory.
750 if (RD->hasFlexibleArrayMember())
751 return;
752
753 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
754
755 // Reset Lo class, this will be recomputed.
756 Current = NoClass;
757 unsigned idx = 0;
Argyrios Kyrtzidiscfbfe782009-06-30 02:36:12 +0000758 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
759 i != e; ++i, ++idx) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000760 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
761 bool BitField = i->isBitField();
762
763 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
764 // fields, it has class MEMORY.
765 //
766 // Note, skip this test for bit-fields, see below.
767 if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
768 Lo = Memory;
769 return;
770 }
771
772 // Classify this field.
773 //
774 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
775 // exceeds a single eightbyte, each is classified
776 // separately. Each eightbyte gets initialized to class
777 // NO_CLASS.
778 Class FieldLo, FieldHi;
779
780 // Bit-fields require special handling, they do not force the
781 // structure to be passed in memory even if unaligned, and
782 // therefore they can straddle an eightbyte.
783 if (BitField) {
784 // Ignore padding bit-fields.
785 if (i->isUnnamedBitfield())
786 continue;
787
788 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
789 uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
790
791 uint64_t EB_Lo = Offset / 64;
792 uint64_t EB_Hi = (Offset + Size - 1) / 64;
793 FieldLo = FieldHi = NoClass;
794 if (EB_Lo) {
795 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
796 FieldLo = NoClass;
797 FieldHi = Integer;
798 } else {
799 FieldLo = Integer;
800 FieldHi = EB_Hi ? Integer : NoClass;
801 }
802 } else
803 classify(i->getType(), Context, Offset, FieldLo, FieldHi);
804 Lo = merge(Lo, FieldLo);
805 Hi = merge(Hi, FieldHi);
806 if (Lo == Memory || Hi == Memory)
807 break;
808 }
809
810 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
811 //
812 // (a) If one of the classes is MEMORY, the whole argument is
813 // passed in memory.
814 //
815 // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
816
817 // The first of these conditions is guaranteed by how we implement
818 // the merge (just bail).
819 //
820 // The second condition occurs in the case of unions; for example
821 // union { _Complex double; unsigned; }.
822 if (Hi == Memory)
823 Lo = Memory;
824 if (Hi == SSEUp && Lo != SSE)
825 Hi = SSE;
826 }
827}
828
829ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
830 const llvm::Type *CoerceTo,
831 ASTContext &Context) const {
Owen Anderson41a75022009-08-13 21:57:51 +0000832 if (CoerceTo == llvm::Type::getInt64Ty(CoerceTo->getContext())) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000833 // Integer and pointer types will end up in a general purpose
834 // register.
835 if (Ty->isIntegralType() || Ty->isPointerType())
Anton Korobeynikov18adbf52009-06-06 09:36:29 +0000836 return (Ty->isPromotableIntegerType() ?
837 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Owen Anderson41a75022009-08-13 21:57:51 +0000838 } else if (CoerceTo == llvm::Type::getDoubleTy(CoerceTo->getContext())) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000839 // FIXME: It would probably be better to make CGFunctionInfo only map using
840 // canonical types than to canonize here.
841 QualType CTy = Context.getCanonicalType(Ty);
842
843 // Float and double end up in a single SSE reg.
844 if (CTy == Context.FloatTy || CTy == Context.DoubleTy)
845 return ABIArgInfo::getDirect();
846
847 }
848
849 return ABIArgInfo::getCoerce(CoerceTo);
850}
851
852ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
853 ASTContext &Context) const {
854 // If this is a scalar LLVM value then assume LLVM will pass it in the right
855 // place naturally.
856 if (!CodeGenFunction::hasAggregateLLVMType(Ty))
Anton Korobeynikov18adbf52009-06-06 09:36:29 +0000857 return (Ty->isPromotableIntegerType() ?
858 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000859
Anders Carlsson20759ad2009-09-16 15:53:40 +0000860 bool ByVal = !isRecordWithNonTrivialDestructorOrCopyConstructor(Ty);
861
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000862 // FIXME: Set alignment correctly.
Anders Carlsson20759ad2009-09-16 15:53:40 +0000863 return ABIArgInfo::getIndirect(0, ByVal);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000864}
865
866ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +0000867 ASTContext &Context,
868 llvm::LLVMContext &VMContext) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000869 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
870 // classification algorithm.
871 X86_64ABIInfo::Class Lo, Hi;
872 classify(RetTy, Context, 0, Lo, Hi);
873
874 // Check some invariants.
875 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
876 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
877 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
878
879 const llvm::Type *ResType = 0;
880 switch (Lo) {
881 case NoClass:
882 return ABIArgInfo::getIgnore();
883
884 case SSEUp:
885 case X87Up:
886 assert(0 && "Invalid classification for lo word.");
887
888 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
889 // hidden argument.
890 case Memory:
891 return getIndirectResult(RetTy, Context);
892
893 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
894 // available register of the sequence %rax, %rdx is used.
895 case Integer:
Owen Anderson41a75022009-08-13 21:57:51 +0000896 ResType = llvm::Type::getInt64Ty(VMContext); break;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000897
898 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
899 // available SSE register of the sequence %xmm0, %xmm1 is used.
900 case SSE:
Owen Anderson41a75022009-08-13 21:57:51 +0000901 ResType = llvm::Type::getDoubleTy(VMContext); break;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000902
903 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
904 // returned on the X87 stack in %st0 as 80-bit x87 number.
905 case X87:
Owen Anderson41a75022009-08-13 21:57:51 +0000906 ResType = llvm::Type::getX86_FP80Ty(VMContext); break;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000907
908 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
909 // part of the value is returned in %st0 and the imaginary part in
910 // %st1.
911 case ComplexX87:
912 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
Owen Anderson41a75022009-08-13 21:57:51 +0000913 ResType = llvm::StructType::get(VMContext, llvm::Type::getX86_FP80Ty(VMContext),
914 llvm::Type::getX86_FP80Ty(VMContext),
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000915 NULL);
916 break;
917 }
918
919 switch (Hi) {
920 // Memory was handled previously and X87 should
921 // never occur as a hi class.
922 case Memory:
923 case X87:
924 assert(0 && "Invalid classification for hi word.");
925
926 case ComplexX87: // Previously handled.
927 case NoClass: break;
928
929 case Integer:
Owen Anderson758428f2009-08-05 23:18:46 +0000930 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson41a75022009-08-13 21:57:51 +0000931 llvm::Type::getInt64Ty(VMContext), NULL);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000932 break;
933 case SSE:
Owen Anderson758428f2009-08-05 23:18:46 +0000934 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson41a75022009-08-13 21:57:51 +0000935 llvm::Type::getDoubleTy(VMContext), NULL);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000936 break;
937
938 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
939 // is passed in the upper half of the last used SSE register.
940 //
941 // SSEUP should always be preceeded by SSE, just widen.
942 case SSEUp:
943 assert(Lo == SSE && "Unexpected SSEUp classification.");
Owen Anderson41a75022009-08-13 21:57:51 +0000944 ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000945 break;
946
947 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
948 // returned together with the previous X87 value in %st0.
949 case X87Up:
950 // If X87Up is preceeded by X87, we don't need to do
951 // anything. However, in some cases with unions it may not be
952 // preceeded by X87. In such situations we follow gcc and pass the
953 // extra bits in an SSE reg.
954 if (Lo != X87)
Owen Anderson758428f2009-08-05 23:18:46 +0000955 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson41a75022009-08-13 21:57:51 +0000956 llvm::Type::getDoubleTy(VMContext), NULL);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000957 break;
958 }
959
960 return getCoerceResult(RetTy, ResType, Context);
961}
962
963ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
Owen Anderson170229f2009-07-14 23:10:40 +0000964 llvm::LLVMContext &VMContext,
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000965 unsigned &neededInt,
966 unsigned &neededSSE) const {
967 X86_64ABIInfo::Class Lo, Hi;
968 classify(Ty, Context, 0, Lo, Hi);
969
970 // Check some invariants.
971 // FIXME: Enforce these by construction.
972 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
973 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
974 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
975
976 neededInt = 0;
977 neededSSE = 0;
978 const llvm::Type *ResType = 0;
979 switch (Lo) {
980 case NoClass:
981 return ABIArgInfo::getIgnore();
982
983 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
984 // on the stack.
985 case Memory:
986
987 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
988 // COMPLEX_X87, it is passed in memory.
989 case X87:
990 case ComplexX87:
991 return getIndirectResult(Ty, Context);
992
993 case SSEUp:
994 case X87Up:
995 assert(0 && "Invalid classification for lo word.");
996
997 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
998 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
999 // and %r9 is used.
1000 case Integer:
1001 ++neededInt;
Owen Anderson41a75022009-08-13 21:57:51 +00001002 ResType = llvm::Type::getInt64Ty(VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001003 break;
1004
1005 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
1006 // available SSE register is used, the registers are taken in the
1007 // order from %xmm0 to %xmm7.
1008 case SSE:
1009 ++neededSSE;
Owen Anderson41a75022009-08-13 21:57:51 +00001010 ResType = llvm::Type::getDoubleTy(VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001011 break;
1012 }
1013
1014 switch (Hi) {
1015 // Memory was handled previously, ComplexX87 and X87 should
1016 // never occur as hi classes, and X87Up must be preceed by X87,
1017 // which is passed in memory.
1018 case Memory:
1019 case X87:
1020 case ComplexX87:
1021 assert(0 && "Invalid classification for hi word.");
1022 break;
1023
1024 case NoClass: break;
1025 case Integer:
Owen Anderson758428f2009-08-05 23:18:46 +00001026 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson41a75022009-08-13 21:57:51 +00001027 llvm::Type::getInt64Ty(VMContext), NULL);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001028 ++neededInt;
1029 break;
1030
1031 // X87Up generally doesn't occur here (long double is passed in
1032 // memory), except in situations involving unions.
1033 case X87Up:
1034 case SSE:
Owen Anderson758428f2009-08-05 23:18:46 +00001035 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson41a75022009-08-13 21:57:51 +00001036 llvm::Type::getDoubleTy(VMContext), NULL);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001037 ++neededSSE;
1038 break;
1039
1040 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
1041 // eightbyte is passed in the upper half of the last used SSE
1042 // register.
1043 case SSEUp:
1044 assert(Lo == SSE && "Unexpected SSEUp classification.");
Owen Anderson41a75022009-08-13 21:57:51 +00001045 ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001046 break;
1047 }
1048
1049 return getCoerceResult(Ty, ResType, Context);
1050}
1051
Owen Anderson170229f2009-07-14 23:10:40 +00001052void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1053 llvm::LLVMContext &VMContext) const {
1054 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
1055 Context, VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001056
1057 // Keep track of the number of assigned registers.
1058 unsigned freeIntRegs = 6, freeSSERegs = 8;
1059
1060 // If the return value is indirect, then the hidden argument is consuming one
1061 // integer register.
1062 if (FI.getReturnInfo().isIndirect())
1063 --freeIntRegs;
1064
1065 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
1066 // get assigned (in left-to-right order) for passing as follows...
1067 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1068 it != ie; ++it) {
1069 unsigned neededInt, neededSSE;
Mike Stump11289f42009-09-09 15:08:12 +00001070 it->info = classifyArgumentType(it->type, Context, VMContext,
Owen Anderson170229f2009-07-14 23:10:40 +00001071 neededInt, neededSSE);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001072
1073 // AMD64-ABI 3.2.3p3: If there are no registers available for any
1074 // eightbyte of an argument, the whole argument is passed on the
1075 // stack. If registers have already been assigned for some
1076 // eightbytes of such an argument, the assignments get reverted.
1077 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
1078 freeIntRegs -= neededInt;
1079 freeSSERegs -= neededSSE;
1080 } else {
1081 it->info = getIndirectResult(it->type, Context);
1082 }
1083 }
1084}
1085
1086static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
1087 QualType Ty,
1088 CodeGenFunction &CGF) {
1089 llvm::Value *overflow_arg_area_p =
1090 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
1091 llvm::Value *overflow_arg_area =
1092 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
1093
1094 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
1095 // byte boundary if alignment needed by type exceeds 8 byte boundary.
1096 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
1097 if (Align > 8) {
1098 // Note that we follow the ABI & gcc here, even though the type
1099 // could in theory have an alignment greater than 16. This case
1100 // shouldn't ever matter in practice.
1101
1102 // overflow_arg_area = (overflow_arg_area + 15) & ~15;
Owen Anderson41a75022009-08-13 21:57:51 +00001103 llvm::Value *Offset =
1104 llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), 15);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001105 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
1106 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
Owen Anderson41a75022009-08-13 21:57:51 +00001107 llvm::Type::getInt64Ty(CGF.getLLVMContext()));
1108 llvm::Value *Mask = llvm::ConstantInt::get(
1109 llvm::Type::getInt64Ty(CGF.getLLVMContext()), ~15LL);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001110 overflow_arg_area =
1111 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1112 overflow_arg_area->getType(),
1113 "overflow_arg_area.align");
1114 }
1115
1116 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
1117 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1118 llvm::Value *Res =
1119 CGF.Builder.CreateBitCast(overflow_arg_area,
Owen Anderson9793f0e2009-07-29 22:16:19 +00001120 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001121
1122 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
1123 // l->overflow_arg_area + sizeof(type).
1124 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
1125 // an 8 byte boundary.
1126
1127 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
Owen Anderson41a75022009-08-13 21:57:51 +00001128 llvm::Value *Offset =
1129 llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()),
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001130 (SizeInBytes + 7) & ~7);
1131 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
1132 "overflow_arg_area.next");
1133 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
1134
1135 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
1136 return Res;
1137}
1138
1139llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1140 CodeGenFunction &CGF) const {
Owen Anderson170229f2009-07-14 23:10:40 +00001141 llvm::LLVMContext &VMContext = CGF.getLLVMContext();
Daniel Dunbard59655c2009-09-12 00:59:49 +00001142 const llvm::Type *i32Ty = llvm::Type::getInt32Ty(VMContext);
1143 const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
Mike Stump11289f42009-09-09 15:08:12 +00001144
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001145 // Assume that va_list type is correct; should be pointer to LLVM type:
1146 // struct {
1147 // i32 gp_offset;
1148 // i32 fp_offset;
1149 // i8* overflow_arg_area;
1150 // i8* reg_save_area;
1151 // };
1152 unsigned neededInt, neededSSE;
Owen Anderson170229f2009-07-14 23:10:40 +00001153 ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), VMContext,
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001154 neededInt, neededSSE);
1155
1156 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
1157 // in the registers. If not go to step 7.
1158 if (!neededInt && !neededSSE)
1159 return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1160
1161 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
1162 // general purpose registers needed to pass type and num_fp to hold
1163 // the number of floating point registers needed.
1164
1165 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
1166 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
1167 // l->fp_offset > 304 - num_fp * 16 go to step 7.
1168 //
1169 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
1170 // register save space).
1171
1172 llvm::Value *InRegs = 0;
1173 llvm::Value *gp_offset_p = 0, *gp_offset = 0;
1174 llvm::Value *fp_offset_p = 0, *fp_offset = 0;
1175 if (neededInt) {
1176 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
1177 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
1178 InRegs =
1179 CGF.Builder.CreateICmpULE(gp_offset,
Daniel Dunbard59655c2009-09-12 00:59:49 +00001180 llvm::ConstantInt::get(i32Ty,
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001181 48 - neededInt * 8),
1182 "fits_in_gp");
1183 }
1184
1185 if (neededSSE) {
1186 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
1187 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
1188 llvm::Value *FitsInFP =
1189 CGF.Builder.CreateICmpULE(fp_offset,
Daniel Dunbard59655c2009-09-12 00:59:49 +00001190 llvm::ConstantInt::get(i32Ty,
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001191 176 - neededSSE * 16),
1192 "fits_in_fp");
1193 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
1194 }
1195
1196 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
1197 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
1198 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
1199 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
1200
1201 // Emit code to load the value if it was passed in registers.
1202
1203 CGF.EmitBlock(InRegBlock);
1204
1205 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
1206 // an offset of l->gp_offset and/or l->fp_offset. This may require
1207 // copying to a temporary location in case the parameter is passed
1208 // in different register classes or requires an alignment greater
1209 // than 8 for general purpose registers and 16 for XMM registers.
1210 //
1211 // FIXME: This really results in shameful code when we end up needing to
1212 // collect arguments from different places; often what should result in a
1213 // simple assembling of a structure from scattered addresses has many more
1214 // loads than necessary. Can we clean this up?
1215 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1216 llvm::Value *RegAddr =
1217 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
1218 "reg_save_area");
1219 if (neededInt && neededSSE) {
1220 // FIXME: Cleanup.
1221 assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
1222 const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
1223 llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
1224 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
1225 const llvm::Type *TyLo = ST->getElementType(0);
1226 const llvm::Type *TyHi = ST->getElementType(1);
1227 assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) &&
1228 "Unexpected ABI info for mixed regs");
Owen Anderson9793f0e2009-07-29 22:16:19 +00001229 const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
1230 const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001231 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1232 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1233 llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr;
1234 llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr;
1235 llvm::Value *V =
1236 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
1237 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1238 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
1239 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1240
Owen Anderson170229f2009-07-14 23:10:40 +00001241 RegAddr = CGF.Builder.CreateBitCast(Tmp,
Owen Anderson9793f0e2009-07-29 22:16:19 +00001242 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001243 } else if (neededInt) {
1244 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1245 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
Owen Anderson9793f0e2009-07-29 22:16:19 +00001246 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001247 } else {
1248 if (neededSSE == 1) {
1249 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1250 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
Owen Anderson9793f0e2009-07-29 22:16:19 +00001251 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001252 } else {
1253 assert(neededSSE == 2 && "Invalid number of needed registers!");
1254 // SSE registers are spaced 16 bytes apart in the register save
1255 // area, we need to collect the two eightbytes together.
1256 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1257 llvm::Value *RegAddrHi =
1258 CGF.Builder.CreateGEP(RegAddrLo,
Daniel Dunbard59655c2009-09-12 00:59:49 +00001259 llvm::ConstantInt::get(i32Ty, 16));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001260 const llvm::Type *DblPtrTy =
Daniel Dunbard59655c2009-09-12 00:59:49 +00001261 llvm::PointerType::getUnqual(DoubleTy);
1262 const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
1263 DoubleTy, NULL);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001264 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
1265 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
1266 DblPtrTy));
1267 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1268 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
1269 DblPtrTy));
1270 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1271 RegAddr = CGF.Builder.CreateBitCast(Tmp,
Owen Anderson9793f0e2009-07-29 22:16:19 +00001272 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001273 }
1274 }
1275
1276 // AMD64-ABI 3.5.7p5: Step 5. Set:
1277 // l->gp_offset = l->gp_offset + num_gp * 8
1278 // l->fp_offset = l->fp_offset + num_fp * 16.
1279 if (neededInt) {
Daniel Dunbard59655c2009-09-12 00:59:49 +00001280 llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededInt * 8);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001281 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
1282 gp_offset_p);
1283 }
1284 if (neededSSE) {
Daniel Dunbard59655c2009-09-12 00:59:49 +00001285 llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededSSE * 16);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001286 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
1287 fp_offset_p);
1288 }
1289 CGF.EmitBranch(ContBlock);
1290
1291 // Emit code to load the value if it was passed in memory.
1292
1293 CGF.EmitBlock(InMemBlock);
1294 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1295
1296 // Return the appropriate result.
1297
1298 CGF.EmitBlock(ContBlock);
1299 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
1300 "vaarg.addr");
1301 ResAddr->reserveOperandSpace(2);
1302 ResAddr->addIncoming(RegAddr, InRegBlock);
1303 ResAddr->addIncoming(MemAddr, InMemBlock);
1304
1305 return ResAddr;
1306}
1307
Daniel Dunbard59655c2009-09-12 00:59:49 +00001308// PIC16 ABI Implementation
1309
1310namespace {
1311
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001312class PIC16ABIInfo : public ABIInfo {
1313 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +00001314 ASTContext &Context,
1315 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001316
1317 ABIArgInfo classifyArgumentType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +00001318 ASTContext &Context,
1319 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001320
Owen Anderson170229f2009-07-14 23:10:40 +00001321 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1322 llvm::LLVMContext &VMContext) const {
1323 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
1324 VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001325 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1326 it != ie; ++it)
Owen Anderson170229f2009-07-14 23:10:40 +00001327 it->info = classifyArgumentType(it->type, Context, VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001328 }
1329
1330 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1331 CodeGenFunction &CGF) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001332};
1333
Daniel Dunbard59655c2009-09-12 00:59:49 +00001334}
1335
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001336ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +00001337 ASTContext &Context,
1338 llvm::LLVMContext &VMContext) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001339 if (RetTy->isVoidType()) {
1340 return ABIArgInfo::getIgnore();
1341 } else {
1342 return ABIArgInfo::getDirect();
1343 }
1344}
1345
1346ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
Owen Anderson170229f2009-07-14 23:10:40 +00001347 ASTContext &Context,
1348 llvm::LLVMContext &VMContext) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001349 return ABIArgInfo::getDirect();
1350}
1351
1352llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1353 CodeGenFunction &CGF) const {
1354 return 0;
1355}
1356
Daniel Dunbard59655c2009-09-12 00:59:49 +00001357// ARM ABI Implementation
1358
1359namespace {
1360
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001361class ARMABIInfo : public ABIInfo {
Daniel Dunbar020daa92009-09-12 01:00:39 +00001362public:
1363 enum ABIKind {
1364 APCS = 0,
1365 AAPCS = 1,
1366 AAPCS_VFP
1367 };
1368
1369private:
1370 ABIKind Kind;
1371
1372public:
1373 ARMABIInfo(ABIKind _Kind) : Kind(_Kind) {}
1374
1375private:
1376 ABIKind getABIKind() const { return Kind; }
1377
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001378 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +00001379 ASTContext &Context,
1380 llvm::LLVMContext &VMCOntext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001381
1382 ABIArgInfo classifyArgumentType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +00001383 ASTContext &Context,
1384 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001385
Owen Anderson170229f2009-07-14 23:10:40 +00001386 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1387 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001388
1389 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1390 CodeGenFunction &CGF) const;
1391};
1392
Daniel Dunbard59655c2009-09-12 00:59:49 +00001393}
1394
Owen Anderson170229f2009-07-14 23:10:40 +00001395void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1396 llvm::LLVMContext &VMContext) const {
Mike Stump11289f42009-09-09 15:08:12 +00001397 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
Owen Anderson170229f2009-07-14 23:10:40 +00001398 VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001399 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1400 it != ie; ++it) {
Owen Anderson170229f2009-07-14 23:10:40 +00001401 it->info = classifyArgumentType(it->type, Context, VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001402 }
Daniel Dunbar020daa92009-09-12 01:00:39 +00001403
1404 // ARM always overrides the calling convention.
1405 switch (getABIKind()) {
1406 case APCS:
1407 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
1408 break;
1409
1410 case AAPCS:
1411 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
1412 break;
1413
1414 case AAPCS_VFP:
1415 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP);
1416 break;
1417 }
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001418}
1419
1420ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
Owen Anderson170229f2009-07-14 23:10:40 +00001421 ASTContext &Context,
1422 llvm::LLVMContext &VMContext) const {
Daniel Dunbar626f1d82009-09-13 08:03:58 +00001423 if (!CodeGenFunction::hasAggregateLLVMType(Ty))
Anton Korobeynikov18adbf52009-06-06 09:36:29 +00001424 return (Ty->isPromotableIntegerType() ?
1425 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Daniel Dunbar626f1d82009-09-13 08:03:58 +00001426
Daniel Dunbar09d33622009-09-14 21:54:03 +00001427 // Ignore empty records.
1428 if (isEmptyRecord(Context, Ty, true))
1429 return ABIArgInfo::getIgnore();
1430
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001431 // FIXME: This is kind of nasty... but there isn't much choice because the ARM
1432 // backend doesn't support byval.
1433 // FIXME: This doesn't handle alignment > 64 bits.
1434 const llvm::Type* ElemTy;
1435 unsigned SizeRegs;
1436 if (Context.getTypeAlign(Ty) > 32) {
Owen Anderson41a75022009-08-13 21:57:51 +00001437 ElemTy = llvm::Type::getInt64Ty(VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001438 SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
1439 } else {
Owen Anderson41a75022009-08-13 21:57:51 +00001440 ElemTy = llvm::Type::getInt32Ty(VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001441 SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
1442 }
1443 std::vector<const llvm::Type*> LLVMFields;
Owen Anderson9793f0e2009-07-29 22:16:19 +00001444 LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
Owen Anderson758428f2009-08-05 23:18:46 +00001445 const llvm::Type* STy = llvm::StructType::get(VMContext, LLVMFields, true);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001446 return ABIArgInfo::getCoerce(STy);
1447}
1448
Daniel Dunbar626f1d82009-09-13 08:03:58 +00001449static bool isIntegerLikeType(QualType Ty,
1450 ASTContext &Context,
1451 llvm::LLVMContext &VMContext) {
1452 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
1453 // is called integer-like if its size is less than or equal to one word, and
1454 // the offset of each of its addressable sub-fields is zero.
1455
1456 uint64_t Size = Context.getTypeSize(Ty);
1457
1458 // Check that the type fits in a word.
1459 if (Size > 32)
1460 return false;
1461
1462 // FIXME: Handle vector types!
1463 if (Ty->isVectorType())
1464 return false;
1465
Daniel Dunbard53bac72009-09-14 02:20:34 +00001466 // Float types are never treated as "integer like".
1467 if (Ty->isRealFloatingType())
1468 return false;
1469
Daniel Dunbar626f1d82009-09-13 08:03:58 +00001470 // If this is a builtin or pointer type then it is ok.
1471 if (Ty->getAsBuiltinType() || Ty->isPointerType())
1472 return true;
1473
1474 // Complex types "should" be ok by the definition above, but they are not.
1475 if (Ty->isAnyComplexType())
1476 return false;
1477
1478 // Single element and zero sized arrays should be allowed, by the definition
1479 // above, but they are not.
1480
1481 // Otherwise, it must be a record type.
1482 const RecordType *RT = Ty->getAs<RecordType>();
1483 if (!RT) return false;
1484
1485 // Ignore records with flexible arrays.
1486 const RecordDecl *RD = RT->getDecl();
1487 if (RD->hasFlexibleArrayMember())
1488 return false;
1489
1490 // Check that all sub-fields are at offset 0, and are themselves "integer
1491 // like".
1492 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
1493
1494 bool HadField = false;
1495 unsigned idx = 0;
1496 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1497 i != e; ++i, ++idx) {
1498 const FieldDecl *FD = *i;
1499
1500 // Check if this field is at offset 0.
1501 uint64_t Offset = Layout.getFieldOffset(idx);
1502 if (Offset != 0) {
1503 // Allow padding bit-fields, but only if they are all at the end of the
1504 // structure (despite the wording above, this matches gcc).
1505 if (FD->isBitField() &&
1506 !FD->getBitWidth()->EvaluateAsInt(Context).getZExtValue()) {
1507 for (; i != e; ++i)
1508 if (!i->isBitField() ||
1509 i->getBitWidth()->EvaluateAsInt(Context).getZExtValue())
1510 return false;
1511
1512 // All remaining fields are padding, allow this.
1513 return true;
1514 }
1515
1516 return false;
1517 }
1518
1519 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
1520 return false;
1521
1522 // Only allow at most one field in a structure. Again this doesn't match the
1523 // wording above, but follows gcc.
1524 if (!RD->isUnion()) {
1525 if (HadField)
1526 return false;
1527
1528 HadField = true;
1529 }
1530 }
1531
1532 return true;
1533}
1534
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001535ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +00001536 ASTContext &Context,
1537 llvm::LLVMContext &VMContext) const {
Daniel Dunbar626f1d82009-09-13 08:03:58 +00001538 if (RetTy->isVoidType())
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001539 return ABIArgInfo::getIgnore();
Daniel Dunbar626f1d82009-09-13 08:03:58 +00001540
1541 if (!CodeGenFunction::hasAggregateLLVMType(RetTy))
Anton Korobeynikov18adbf52009-06-06 09:36:29 +00001542 return (RetTy->isPromotableIntegerType() ?
1543 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Daniel Dunbar626f1d82009-09-13 08:03:58 +00001544
1545 // Are we following APCS?
1546 if (getABIKind() == APCS) {
1547 if (isEmptyRecord(Context, RetTy, false))
1548 return ABIArgInfo::getIgnore();
1549
1550 // Integer like structures are returned in r0.
1551 if (isIntegerLikeType(RetTy, Context, VMContext)) {
1552 // Return in the smallest viable integer type.
1553 uint64_t Size = Context.getTypeSize(RetTy);
1554 if (Size <= 8)
1555 return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
1556 if (Size <= 16)
1557 return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
1558 return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
1559 }
1560
1561 // Otherwise return in memory.
1562 return ABIArgInfo::getIndirect(0);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001563 }
Daniel Dunbar626f1d82009-09-13 08:03:58 +00001564
1565 // Otherwise this is an AAPCS variant.
1566
Daniel Dunbar1ce72512009-09-14 00:56:55 +00001567 if (isEmptyRecord(Context, RetTy, true))
1568 return ABIArgInfo::getIgnore();
1569
Daniel Dunbar626f1d82009-09-13 08:03:58 +00001570 // Aggregates <= 4 bytes are returned in r0; other aggregates
1571 // are returned indirectly.
1572 uint64_t Size = Context.getTypeSize(RetTy);
Daniel Dunbar1ce72512009-09-14 00:56:55 +00001573 if (Size <= 32) {
1574 // Return in the smallest viable integer type.
1575 if (Size <= 8)
1576 return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
1577 if (Size <= 16)
1578 return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
Daniel Dunbar626f1d82009-09-13 08:03:58 +00001579 return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
Daniel Dunbar1ce72512009-09-14 00:56:55 +00001580 }
1581
Daniel Dunbar626f1d82009-09-13 08:03:58 +00001582 return ABIArgInfo::getIndirect(0);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001583}
1584
1585llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1586 CodeGenFunction &CGF) const {
1587 // FIXME: Need to handle alignment
Mike Stump11289f42009-09-09 15:08:12 +00001588 const llvm::Type *BP =
Owen Anderson41a75022009-08-13 21:57:51 +00001589 llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(CGF.getLLVMContext()));
Owen Anderson9793f0e2009-07-29 22:16:19 +00001590 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001591
1592 CGBuilderTy &Builder = CGF.Builder;
1593 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1594 "ap");
1595 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
1596 llvm::Type *PTy =
Owen Anderson9793f0e2009-07-29 22:16:19 +00001597 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001598 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1599
1600 uint64_t Offset =
1601 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
1602 llvm::Value *NextAddr =
Owen Anderson41a75022009-08-13 21:57:51 +00001603 Builder.CreateGEP(Addr, llvm::ConstantInt::get(
1604 llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001605 "ap.next");
1606 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1607
1608 return AddrTyped;
1609}
1610
1611ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +00001612 ASTContext &Context,
1613 llvm::LLVMContext &VMContext) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001614 if (RetTy->isVoidType()) {
1615 return ABIArgInfo::getIgnore();
1616 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1617 return ABIArgInfo::getIndirect(0);
1618 } else {
Anton Korobeynikov18adbf52009-06-06 09:36:29 +00001619 return (RetTy->isPromotableIntegerType() ?
1620 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001621 }
1622}
1623
Daniel Dunbard59655c2009-09-12 00:59:49 +00001624// SystemZ ABI Implementation
1625
Anton Korobeynikovb5b703b2009-07-16 20:09:57 +00001626namespace {
Daniel Dunbard59655c2009-09-12 00:59:49 +00001627
Anton Korobeynikovb5b703b2009-07-16 20:09:57 +00001628class SystemZABIInfo : public ABIInfo {
1629 bool isPromotableIntegerType(QualType Ty) const;
1630
1631 ABIArgInfo classifyReturnType(QualType RetTy, ASTContext &Context,
1632 llvm::LLVMContext &VMContext) const;
1633
1634 ABIArgInfo classifyArgumentType(QualType RetTy, ASTContext &Context,
1635 llvm::LLVMContext &VMContext) const;
1636
1637 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1638 llvm::LLVMContext &VMContext) const {
1639 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
1640 Context, VMContext);
1641 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1642 it != ie; ++it)
1643 it->info = classifyArgumentType(it->type, Context, VMContext);
1644 }
1645
1646 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1647 CodeGenFunction &CGF) const;
1648};
Daniel Dunbard59655c2009-09-12 00:59:49 +00001649
Anton Korobeynikovb5b703b2009-07-16 20:09:57 +00001650}
1651
1652bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
1653 // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended.
1654 if (const BuiltinType *BT = Ty->getAsBuiltinType())
1655 switch (BT->getKind()) {
1656 case BuiltinType::Bool:
1657 case BuiltinType::Char_S:
1658 case BuiltinType::Char_U:
1659 case BuiltinType::SChar:
1660 case BuiltinType::UChar:
1661 case BuiltinType::Short:
1662 case BuiltinType::UShort:
1663 case BuiltinType::Int:
1664 case BuiltinType::UInt:
1665 return true;
1666 default:
1667 return false;
1668 }
1669 return false;
1670}
1671
1672llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1673 CodeGenFunction &CGF) const {
1674 // FIXME: Implement
1675 return 0;
1676}
1677
1678
1679ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy,
1680 ASTContext &Context,
Daniel Dunbard59655c2009-09-12 00:59:49 +00001681 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovb5b703b2009-07-16 20:09:57 +00001682 if (RetTy->isVoidType()) {
1683 return ABIArgInfo::getIgnore();
1684 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1685 return ABIArgInfo::getIndirect(0);
1686 } else {
1687 return (isPromotableIntegerType(RetTy) ?
1688 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1689 }
1690}
1691
1692ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty,
1693 ASTContext &Context,
Daniel Dunbard59655c2009-09-12 00:59:49 +00001694 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovb5b703b2009-07-16 20:09:57 +00001695 if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
1696 return ABIArgInfo::getIndirect(0);
1697 } else {
1698 return (isPromotableIntegerType(Ty) ?
1699 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1700 }
1701}
1702
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001703ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
Owen Anderson170229f2009-07-14 23:10:40 +00001704 ASTContext &Context,
1705 llvm::LLVMContext &VMContext) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001706 if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
1707 return ABIArgInfo::getIndirect(0);
1708 } else {
Anton Korobeynikov18adbf52009-06-06 09:36:29 +00001709 return (Ty->isPromotableIntegerType() ?
1710 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001711 }
1712}
1713
1714llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1715 CodeGenFunction &CGF) const {
1716 return 0;
1717}
1718
1719const ABIInfo &CodeGenTypes::getABIInfo() const {
1720 if (TheABIInfo)
1721 return *TheABIInfo;
1722
Daniel Dunbare3532f82009-08-24 08:52:16 +00001723 // For now we just cache the ABIInfo in CodeGenTypes and don't free it.
1724
Daniel Dunbar40165182009-08-24 09:10:05 +00001725 const llvm::Triple &Triple(getContext().Target.getTriple());
1726 switch (Triple.getArch()) {
Daniel Dunbare3532f82009-08-24 08:52:16 +00001727 default:
1728 return *(TheABIInfo = new DefaultABIInfo);
1729
Daniel Dunbard59655c2009-09-12 00:59:49 +00001730 case llvm::Triple::arm:
1731 case llvm::Triple::thumb:
Daniel Dunbar020daa92009-09-12 01:00:39 +00001732 // FIXME: We want to know the float calling convention as well.
Daniel Dunbarb4091a92009-09-14 00:35:03 +00001733 if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0)
Daniel Dunbar020daa92009-09-12 01:00:39 +00001734 return *(TheABIInfo = new ARMABIInfo(ARMABIInfo::APCS));
1735
1736 return *(TheABIInfo = new ARMABIInfo(ARMABIInfo::AAPCS));
Daniel Dunbard59655c2009-09-12 00:59:49 +00001737
1738 case llvm::Triple::pic16:
1739 return *(TheABIInfo = new PIC16ABIInfo());
1740
1741 case llvm::Triple::systemz:
1742 return *(TheABIInfo = new SystemZABIInfo());
1743
Daniel Dunbar40165182009-08-24 09:10:05 +00001744 case llvm::Triple::x86:
1745 if (Triple.getOS() == llvm::Triple::Darwin)
Daniel Dunbare3532f82009-08-24 08:52:16 +00001746 return *(TheABIInfo = new X86_32ABIInfo(Context, true, true));
1747
Daniel Dunbar40165182009-08-24 09:10:05 +00001748 switch (Triple.getOS()) {
Daniel Dunbare3532f82009-08-24 08:52:16 +00001749 case llvm::Triple::Cygwin:
1750 case llvm::Triple::DragonFly:
1751 case llvm::Triple::MinGW32:
1752 case llvm::Triple::MinGW64:
David Chisnall2c5bef22009-09-03 01:48:05 +00001753 case llvm::Triple::FreeBSD:
Daniel Dunbare3532f82009-08-24 08:52:16 +00001754 case llvm::Triple::OpenBSD:
1755 return *(TheABIInfo = new X86_32ABIInfo(Context, false, true));
1756
1757 default:
1758 return *(TheABIInfo = new X86_32ABIInfo(Context, false, false));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001759 }
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001760
Daniel Dunbare3532f82009-08-24 08:52:16 +00001761 case llvm::Triple::x86_64:
1762 return *(TheABIInfo = new X86_64ABIInfo());
Daniel Dunbare3532f82009-08-24 08:52:16 +00001763 }
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001764}