blob: 96c70feafe94f2034e57201321a9d2f7643194f6 [file] [log] [blame]
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001//===---- TargetABIInfo.cpp - Encapsulate target ABI details ----*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "ABIInfo.h"
16#include "CodeGenFunction.h"
Anders Carlsson19cc4ab2009-07-18 19:43:29 +000017#include "clang/AST/RecordLayout.h"
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +000018#include "llvm/Type.h"
Daniel Dunbar2c0843f2009-08-24 08:52:16 +000019#include "llvm/ADT/Triple.h"
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +000020
21using namespace clang;
22using namespace CodeGen;
23
24ABIInfo::~ABIInfo() {}
25
26void ABIArgInfo::dump() const {
27 fprintf(stderr, "(ABIArgInfo Kind=");
28 switch (TheKind) {
29 case Direct:
30 fprintf(stderr, "Direct");
31 break;
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +000032 case Extend:
33 fprintf(stderr, "Extend");
34 break;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +000035 case Ignore:
36 fprintf(stderr, "Ignore");
37 break;
38 case Coerce:
39 fprintf(stderr, "Coerce Type=");
40 getCoerceToType()->print(llvm::errs());
41 break;
42 case Indirect:
43 fprintf(stderr, "Indirect Align=%d", getIndirectAlign());
44 break;
45 case Expand:
46 fprintf(stderr, "Expand");
47 break;
48 }
49 fprintf(stderr, ")\n");
50}
51
52static bool isEmptyRecord(ASTContext &Context, QualType T);
53
54/// isEmptyField - Return true iff a the field is "empty", that is it
55/// is an unnamed bit-field or an (array of) empty record(s).
56static bool isEmptyField(ASTContext &Context, const FieldDecl *FD) {
57 if (FD->isUnnamedBitfield())
58 return true;
59
60 QualType FT = FD->getType();
61 // Constant arrays of empty records count as empty, strip them off.
62 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
63 FT = AT->getElementType();
64
65 return isEmptyRecord(Context, FT);
66}
67
68/// isEmptyRecord - Return true iff a structure contains only empty
69/// fields. Note that a structure with a flexible array member is not
70/// considered empty.
71static bool isEmptyRecord(ASTContext &Context, QualType T) {
Ted Kremenek6217b802009-07-29 21:53:49 +000072 const RecordType *RT = T->getAs<RecordType>();
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +000073 if (!RT)
74 return 0;
75 const RecordDecl *RD = RT->getDecl();
76 if (RD->hasFlexibleArrayMember())
77 return false;
Argyrios Kyrtzidis17945a02009-06-30 02:36:12 +000078 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
79 i != e; ++i)
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +000080 if (!isEmptyField(Context, *i))
81 return false;
82 return true;
83}
84
85/// isSingleElementStruct - Determine if a structure is a "single
86/// element struct", i.e. it has exactly one non-empty field or
87/// exactly one field which is itself a single element
88/// struct. Structures with flexible array members are never
89/// considered single element structs.
90///
91/// \return The field declaration for the single non-empty field, if
92/// it exists.
93static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
94 const RecordType *RT = T->getAsStructureType();
95 if (!RT)
96 return 0;
97
98 const RecordDecl *RD = RT->getDecl();
99 if (RD->hasFlexibleArrayMember())
100 return 0;
101
102 const Type *Found = 0;
Argyrios Kyrtzidis17945a02009-06-30 02:36:12 +0000103 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
104 i != e; ++i) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000105 const FieldDecl *FD = *i;
106 QualType FT = FD->getType();
107
108 // Ignore empty fields.
109 if (isEmptyField(Context, FD))
110 continue;
111
112 // If we already found an element then this isn't a single-element
113 // struct.
114 if (Found)
115 return 0;
116
117 // Treat single element arrays as the element.
118 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
119 if (AT->getSize().getZExtValue() != 1)
120 break;
121 FT = AT->getElementType();
122 }
123
124 if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
125 Found = FT.getTypePtr();
126 } else {
127 Found = isSingleElementStruct(FT, Context);
128 if (!Found)
129 return 0;
130 }
131 }
132
133 return Found;
134}
135
136static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
137 if (!Ty->getAsBuiltinType() && !Ty->isPointerType())
138 return false;
139
140 uint64_t Size = Context.getTypeSize(Ty);
141 return Size == 32 || Size == 64;
142}
143
144static bool areAllFields32Or64BitBasicType(const RecordDecl *RD,
145 ASTContext &Context) {
Argyrios Kyrtzidis17945a02009-06-30 02:36:12 +0000146 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
147 i != e; ++i) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000148 const FieldDecl *FD = *i;
149
150 if (!is32Or64BitBasicType(FD->getType(), Context))
151 return false;
152
153 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
154 // how to expand them yet, and the predicate for telling if a bitfield still
155 // counts as "basic" is more complicated than what we were doing previously.
156 if (FD->isBitField())
157 return false;
158 }
159
160 return true;
161}
162
Eli Friedmana1e6de92009-06-13 21:37:10 +0000163static bool typeContainsSSEVector(const RecordDecl *RD, ASTContext &Context) {
Argyrios Kyrtzidis17945a02009-06-30 02:36:12 +0000164 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
165 i != e; ++i) {
Eli Friedmana1e6de92009-06-13 21:37:10 +0000166 const FieldDecl *FD = *i;
167
168 if (FD->getType()->isVectorType() &&
169 Context.getTypeSize(FD->getType()) >= 128)
170 return true;
171
Ted Kremenek6217b802009-07-29 21:53:49 +0000172 if (const RecordType* RT = FD->getType()->getAs<RecordType>())
Eli Friedmana1e6de92009-06-13 21:37:10 +0000173 if (typeContainsSSEVector(RT->getDecl(), Context))
174 return true;
175 }
176
177 return false;
178}
179
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000180namespace {
181/// DefaultABIInfo - The default implementation for ABI specific
182/// details. This implementation provides information which results in
183/// self-consistent and sensible LLVM IR generation, but does not
184/// conform to any particular ABI.
185class DefaultABIInfo : public ABIInfo {
186 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000187 ASTContext &Context,
188 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000189
190 ABIArgInfo classifyArgumentType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000191 ASTContext &Context,
192 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000193
Owen Andersona1cf15f2009-07-14 23:10:40 +0000194 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
195 llvm::LLVMContext &VMContext) const {
196 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
197 VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000198 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
199 it != ie; ++it)
Owen Andersona1cf15f2009-07-14 23:10:40 +0000200 it->info = classifyArgumentType(it->type, Context, VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000201 }
202
203 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
204 CodeGenFunction &CGF) const;
205};
206
207/// X86_32ABIInfo - The X86-32 ABI information.
208class X86_32ABIInfo : public ABIInfo {
209 ASTContext &Context;
David Chisnall1e4249c2009-08-17 23:08:21 +0000210 bool IsDarwinVectorABI;
211 bool IsSmallStructInRegABI;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000212
213 static bool isRegisterSize(unsigned Size) {
214 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
215 }
216
217 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
218
Eli Friedmana1e6de92009-06-13 21:37:10 +0000219 static unsigned getIndirectArgumentAlignment(QualType Ty,
220 ASTContext &Context);
221
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000222public:
223 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000224 ASTContext &Context,
225 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000226
227 ABIArgInfo classifyArgumentType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000228 ASTContext &Context,
229 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000230
Owen Andersona1cf15f2009-07-14 23:10:40 +0000231 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
232 llvm::LLVMContext &VMContext) const {
233 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
234 VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000235 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
236 it != ie; ++it)
Owen Andersona1cf15f2009-07-14 23:10:40 +0000237 it->info = classifyArgumentType(it->type, Context, VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000238 }
239
240 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
241 CodeGenFunction &CGF) const;
242
David Chisnall1e4249c2009-08-17 23:08:21 +0000243 X86_32ABIInfo(ASTContext &Context, bool d, bool p)
244 : ABIInfo(), Context(Context), IsDarwinVectorABI(d),
245 IsSmallStructInRegABI(p) {}
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000246};
247}
248
249
250/// shouldReturnTypeInRegister - Determine if the given type should be
251/// passed in a register (for the Darwin ABI).
252bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
253 ASTContext &Context) {
254 uint64_t Size = Context.getTypeSize(Ty);
255
256 // Type must be register sized.
257 if (!isRegisterSize(Size))
258 return false;
259
260 if (Ty->isVectorType()) {
261 // 64- and 128- bit vectors inside structures are not returned in
262 // registers.
263 if (Size == 64 || Size == 128)
264 return false;
265
266 return true;
267 }
268
269 // If this is a builtin, pointer, or complex type, it is ok.
270 if (Ty->getAsBuiltinType() || Ty->isPointerType() || Ty->isAnyComplexType())
271 return true;
272
273 // Arrays are treated like records.
274 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
275 return shouldReturnTypeInRegister(AT->getElementType(), Context);
276
277 // Otherwise, it must be a record type.
Ted Kremenek6217b802009-07-29 21:53:49 +0000278 const RecordType *RT = Ty->getAs<RecordType>();
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000279 if (!RT) return false;
280
281 // Structure types are passed in register if all fields would be
282 // passed in a register.
Argyrios Kyrtzidis17945a02009-06-30 02:36:12 +0000283 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
284 e = RT->getDecl()->field_end(); i != e; ++i) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000285 const FieldDecl *FD = *i;
286
287 // Empty fields are ignored.
288 if (isEmptyField(Context, FD))
289 continue;
290
291 // Check fields recursively.
292 if (!shouldReturnTypeInRegister(FD->getType(), Context))
293 return false;
294 }
295
296 return true;
297}
298
299ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000300 ASTContext &Context,
301 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000302 if (RetTy->isVoidType()) {
303 return ABIArgInfo::getIgnore();
304 } else if (const VectorType *VT = RetTy->getAsVectorType()) {
305 // On Darwin, some vectors are returned in registers.
David Chisnall1e4249c2009-08-17 23:08:21 +0000306 if (IsDarwinVectorABI) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000307 uint64_t Size = Context.getTypeSize(RetTy);
308
309 // 128-bit vectors are a special case; they are returned in
310 // registers and we need to make sure to pick a type the LLVM
311 // backend will like.
312 if (Size == 128)
Owen Anderson0032b272009-08-13 21:57:51 +0000313 return ABIArgInfo::getCoerce(llvm::VectorType::get(
314 llvm::Type::getInt64Ty(VMContext), 2));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000315
316 // Always return in register if it fits in a general purpose
317 // register, or if it is 64 bits and has a single element.
318 if ((Size == 8 || Size == 16 || Size == 32) ||
319 (Size == 64 && VT->getNumElements() == 1))
Owen Anderson0032b272009-08-13 21:57:51 +0000320 return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000321
322 return ABIArgInfo::getIndirect(0);
323 }
324
325 return ABIArgInfo::getDirect();
326 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
327 // Structures with flexible arrays are always indirect.
328 if (const RecordType *RT = RetTy->getAsStructureType())
329 if (RT->getDecl()->hasFlexibleArrayMember())
330 return ABIArgInfo::getIndirect(0);
331
David Chisnall1e4249c2009-08-17 23:08:21 +0000332 // If specified, structs and unions are always indirect.
333 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000334 return ABIArgInfo::getIndirect(0);
335
336 // Classify "single element" structs as their element type.
337 if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
338 if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) {
339 if (BT->isIntegerType()) {
340 // We need to use the size of the structure, padding
341 // bit-fields can adjust that to be larger than the single
342 // element type.
343 uint64_t Size = Context.getTypeSize(RetTy);
Owen Andersona1cf15f2009-07-14 23:10:40 +0000344 return ABIArgInfo::getCoerce(
Owen Anderson0032b272009-08-13 21:57:51 +0000345 llvm::IntegerType::get(VMContext, (unsigned) Size));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000346 } else if (BT->getKind() == BuiltinType::Float) {
347 assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
348 "Unexpect single element structure size!");
Owen Anderson0032b272009-08-13 21:57:51 +0000349 return ABIArgInfo::getCoerce(llvm::Type::getFloatTy(VMContext));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000350 } else if (BT->getKind() == BuiltinType::Double) {
351 assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
352 "Unexpect single element structure size!");
Owen Anderson0032b272009-08-13 21:57:51 +0000353 return ABIArgInfo::getCoerce(llvm::Type::getDoubleTy(VMContext));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000354 }
355 } else if (SeltTy->isPointerType()) {
356 // FIXME: It would be really nice if this could come out as the proper
357 // pointer type.
358 llvm::Type *PtrTy =
Owen Anderson0032b272009-08-13 21:57:51 +0000359 llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000360 return ABIArgInfo::getCoerce(PtrTy);
361 } else if (SeltTy->isVectorType()) {
362 // 64- and 128-bit vectors are never returned in a
363 // register when inside a structure.
364 uint64_t Size = Context.getTypeSize(RetTy);
365 if (Size == 64 || Size == 128)
366 return ABIArgInfo::getIndirect(0);
367
Owen Andersona1cf15f2009-07-14 23:10:40 +0000368 return classifyReturnType(QualType(SeltTy, 0), Context, VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000369 }
370 }
371
372 // Small structures which are register sized are generally returned
373 // in a register.
374 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
375 uint64_t Size = Context.getTypeSize(RetTy);
Owen Anderson0032b272009-08-13 21:57:51 +0000376 return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000377 }
378
379 return ABIArgInfo::getIndirect(0);
380 } else {
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +0000381 return (RetTy->isPromotableIntegerType() ?
382 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000383 }
384}
385
Eli Friedmana1e6de92009-06-13 21:37:10 +0000386unsigned X86_32ABIInfo::getIndirectArgumentAlignment(QualType Ty,
387 ASTContext &Context) {
388 unsigned Align = Context.getTypeAlign(Ty);
389 if (Align < 128) return 0;
Ted Kremenek6217b802009-07-29 21:53:49 +0000390 if (const RecordType* RT = Ty->getAs<RecordType>())
Eli Friedmana1e6de92009-06-13 21:37:10 +0000391 if (typeContainsSSEVector(RT->getDecl(), Context))
392 return 16;
393 return 0;
394}
395
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000396ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000397 ASTContext &Context,
398 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000399 // FIXME: Set alignment on indirect arguments.
400 if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
401 // Structures with flexible arrays are always indirect.
402 if (const RecordType *RT = Ty->getAsStructureType())
403 if (RT->getDecl()->hasFlexibleArrayMember())
Eli Friedmana1e6de92009-06-13 21:37:10 +0000404 return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty,
405 Context));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000406
407 // Ignore empty structs.
Eli Friedmana1e6de92009-06-13 21:37:10 +0000408 if (Ty->isStructureType() && Context.getTypeSize(Ty) == 0)
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000409 return ABIArgInfo::getIgnore();
410
411 // Expand structs with size <= 128-bits which consist only of
412 // basic types (int, long long, float, double, xxx*). This is
413 // non-recursive and does not ignore empty fields.
414 if (const RecordType *RT = Ty->getAsStructureType()) {
415 if (Context.getTypeSize(Ty) <= 4*32 &&
416 areAllFields32Or64BitBasicType(RT->getDecl(), Context))
417 return ABIArgInfo::getExpand();
418 }
419
Eli Friedmana1e6de92009-06-13 21:37:10 +0000420 return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty, Context));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000421 } else {
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +0000422 return (Ty->isPromotableIntegerType() ?
423 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000424 }
425}
426
427llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
428 CodeGenFunction &CGF) const {
Owen Anderson0032b272009-08-13 21:57:51 +0000429 const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(CGF.getLLVMContext()));
Owen Anderson96e0fc72009-07-29 22:16:19 +0000430 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000431
432 CGBuilderTy &Builder = CGF.Builder;
433 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
434 "ap");
435 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
436 llvm::Type *PTy =
Owen Anderson96e0fc72009-07-29 22:16:19 +0000437 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000438 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
439
440 uint64_t Offset =
441 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
442 llvm::Value *NextAddr =
Owen Anderson0032b272009-08-13 21:57:51 +0000443 Builder.CreateGEP(Addr, llvm::ConstantInt::get(
444 llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000445 "ap.next");
446 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
447
448 return AddrTyped;
449}
450
451namespace {
452/// X86_64ABIInfo - The X86_64 ABI information.
453class X86_64ABIInfo : public ABIInfo {
454 enum Class {
455 Integer = 0,
456 SSE,
457 SSEUp,
458 X87,
459 X87Up,
460 ComplexX87,
461 NoClass,
462 Memory
463 };
464
465 /// merge - Implement the X86_64 ABI merging algorithm.
466 ///
467 /// Merge an accumulating classification \arg Accum with a field
468 /// classification \arg Field.
469 ///
470 /// \param Accum - The accumulating classification. This should
471 /// always be either NoClass or the result of a previous merge
472 /// call. In addition, this should never be Memory (the caller
473 /// should just return Memory for the aggregate).
474 Class merge(Class Accum, Class Field) const;
475
476 /// classify - Determine the x86_64 register classes in which the
477 /// given type T should be passed.
478 ///
479 /// \param Lo - The classification for the parts of the type
480 /// residing in the low word of the containing object.
481 ///
482 /// \param Hi - The classification for the parts of the type
483 /// residing in the high word of the containing object.
484 ///
485 /// \param OffsetBase - The bit offset of this type in the
486 /// containing object. Some parameters are classified different
487 /// depending on whether they straddle an eightbyte boundary.
488 ///
489 /// If a word is unused its result will be NoClass; if a type should
490 /// be passed in Memory then at least the classification of \arg Lo
491 /// will be Memory.
492 ///
493 /// The \arg Lo class will be NoClass iff the argument is ignored.
494 ///
495 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
496 /// also be ComplexX87.
497 void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
498 Class &Lo, Class &Hi) const;
499
500 /// getCoerceResult - Given a source type \arg Ty and an LLVM type
501 /// to coerce to, chose the best way to pass Ty in the same place
502 /// that \arg CoerceTo would be passed, but while keeping the
503 /// emitted code as simple as possible.
504 ///
505 /// FIXME: Note, this should be cleaned up to just take an enumeration of all
506 /// the ways we might want to pass things, instead of constructing an LLVM
507 /// type. This makes this code more explicit, and it makes it clearer that we
508 /// are also doing this for correctness in the case of passing scalar types.
509 ABIArgInfo getCoerceResult(QualType Ty,
510 const llvm::Type *CoerceTo,
511 ASTContext &Context) const;
512
513 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
514 /// such that the argument will be passed in memory.
515 ABIArgInfo getIndirectResult(QualType Ty,
516 ASTContext &Context) const;
517
518 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000519 ASTContext &Context,
520 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000521
522 ABIArgInfo classifyArgumentType(QualType Ty,
523 ASTContext &Context,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000524 llvm::LLVMContext &VMContext,
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000525 unsigned &neededInt,
526 unsigned &neededSSE) const;
527
528public:
Owen Andersona1cf15f2009-07-14 23:10:40 +0000529 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
530 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000531
532 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
533 CodeGenFunction &CGF) const;
534};
535}
536
537X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
538 Class Field) const {
539 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
540 // classified recursively so that always two fields are
541 // considered. The resulting class is calculated according to
542 // the classes of the fields in the eightbyte:
543 //
544 // (a) If both classes are equal, this is the resulting class.
545 //
546 // (b) If one of the classes is NO_CLASS, the resulting class is
547 // the other class.
548 //
549 // (c) If one of the classes is MEMORY, the result is the MEMORY
550 // class.
551 //
552 // (d) If one of the classes is INTEGER, the result is the
553 // INTEGER.
554 //
555 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
556 // MEMORY is used as class.
557 //
558 // (f) Otherwise class SSE is used.
559
560 // Accum should never be memory (we should have returned) or
561 // ComplexX87 (because this cannot be passed in a structure).
562 assert((Accum != Memory && Accum != ComplexX87) &&
563 "Invalid accumulated classification during merge.");
564 if (Accum == Field || Field == NoClass)
565 return Accum;
566 else if (Field == Memory)
567 return Memory;
568 else if (Accum == NoClass)
569 return Field;
570 else if (Accum == Integer || Field == Integer)
571 return Integer;
572 else if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
573 Accum == X87 || Accum == X87Up)
574 return Memory;
575 else
576 return SSE;
577}
578
579void X86_64ABIInfo::classify(QualType Ty,
580 ASTContext &Context,
581 uint64_t OffsetBase,
582 Class &Lo, Class &Hi) const {
583 // FIXME: This code can be simplified by introducing a simple value class for
584 // Class pairs with appropriate constructor methods for the various
585 // situations.
586
587 // FIXME: Some of the split computations are wrong; unaligned vectors
588 // shouldn't be passed in registers for example, so there is no chance they
589 // can straddle an eightbyte. Verify & simplify.
590
591 Lo = Hi = NoClass;
592
593 Class &Current = OffsetBase < 64 ? Lo : Hi;
594 Current = Memory;
595
596 if (const BuiltinType *BT = Ty->getAsBuiltinType()) {
597 BuiltinType::Kind k = BT->getKind();
598
599 if (k == BuiltinType::Void) {
600 Current = NoClass;
601 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
602 Lo = Integer;
603 Hi = Integer;
604 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
605 Current = Integer;
606 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
607 Current = SSE;
608 } else if (k == BuiltinType::LongDouble) {
609 Lo = X87;
610 Hi = X87Up;
611 }
612 // FIXME: _Decimal32 and _Decimal64 are SSE.
613 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
614 } else if (const EnumType *ET = Ty->getAsEnumType()) {
615 // Classify the underlying integer type.
616 classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
617 } else if (Ty->hasPointerRepresentation()) {
618 Current = Integer;
619 } else if (const VectorType *VT = Ty->getAsVectorType()) {
620 uint64_t Size = Context.getTypeSize(VT);
621 if (Size == 32) {
622 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
623 // float> as integer.
624 Current = Integer;
625
626 // If this type crosses an eightbyte boundary, it should be
627 // split.
628 uint64_t EB_Real = (OffsetBase) / 64;
629 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
630 if (EB_Real != EB_Imag)
631 Hi = Lo;
632 } else if (Size == 64) {
633 // gcc passes <1 x double> in memory. :(
634 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
635 return;
636
637 // gcc passes <1 x long long> as INTEGER.
638 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
639 Current = Integer;
640 else
641 Current = SSE;
642
643 // If this type crosses an eightbyte boundary, it should be
644 // split.
645 if (OffsetBase && OffsetBase != 64)
646 Hi = Lo;
647 } else if (Size == 128) {
648 Lo = SSE;
649 Hi = SSEUp;
650 }
651 } else if (const ComplexType *CT = Ty->getAsComplexType()) {
652 QualType ET = Context.getCanonicalType(CT->getElementType());
653
654 uint64_t Size = Context.getTypeSize(Ty);
655 if (ET->isIntegralType()) {
656 if (Size <= 64)
657 Current = Integer;
658 else if (Size <= 128)
659 Lo = Hi = Integer;
660 } else if (ET == Context.FloatTy)
661 Current = SSE;
662 else if (ET == Context.DoubleTy)
663 Lo = Hi = SSE;
664 else if (ET == Context.LongDoubleTy)
665 Current = ComplexX87;
666
667 // If this complex type crosses an eightbyte boundary then it
668 // should be split.
669 uint64_t EB_Real = (OffsetBase) / 64;
670 uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
671 if (Hi == NoClass && EB_Real != EB_Imag)
672 Hi = Lo;
673 } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
674 // Arrays are treated like structures.
675
676 uint64_t Size = Context.getTypeSize(Ty);
677
678 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
679 // than two eightbytes, ..., it has class MEMORY.
680 if (Size > 128)
681 return;
682
683 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
684 // fields, it has class MEMORY.
685 //
686 // Only need to check alignment of array base.
687 if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
688 return;
689
690 // Otherwise implement simplified merge. We could be smarter about
691 // this, but it isn't worth it and would be harder to verify.
692 Current = NoClass;
693 uint64_t EltSize = Context.getTypeSize(AT->getElementType());
694 uint64_t ArraySize = AT->getSize().getZExtValue();
695 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
696 Class FieldLo, FieldHi;
697 classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
698 Lo = merge(Lo, FieldLo);
699 Hi = merge(Hi, FieldHi);
700 if (Lo == Memory || Hi == Memory)
701 break;
702 }
703
704 // Do post merger cleanup (see below). Only case we worry about is Memory.
705 if (Hi == Memory)
706 Lo = Memory;
707 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
Ted Kremenek6217b802009-07-29 21:53:49 +0000708 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000709 uint64_t Size = Context.getTypeSize(Ty);
710
711 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
712 // than two eightbytes, ..., it has class MEMORY.
713 if (Size > 128)
714 return;
715
716 const RecordDecl *RD = RT->getDecl();
717
718 // Assume variable sized types are passed in memory.
719 if (RD->hasFlexibleArrayMember())
720 return;
721
722 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
723
724 // Reset Lo class, this will be recomputed.
725 Current = NoClass;
726 unsigned idx = 0;
Argyrios Kyrtzidis17945a02009-06-30 02:36:12 +0000727 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
728 i != e; ++i, ++idx) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000729 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
730 bool BitField = i->isBitField();
731
732 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
733 // fields, it has class MEMORY.
734 //
735 // Note, skip this test for bit-fields, see below.
736 if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
737 Lo = Memory;
738 return;
739 }
740
741 // Classify this field.
742 //
743 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
744 // exceeds a single eightbyte, each is classified
745 // separately. Each eightbyte gets initialized to class
746 // NO_CLASS.
747 Class FieldLo, FieldHi;
748
749 // Bit-fields require special handling, they do not force the
750 // structure to be passed in memory even if unaligned, and
751 // therefore they can straddle an eightbyte.
752 if (BitField) {
753 // Ignore padding bit-fields.
754 if (i->isUnnamedBitfield())
755 continue;
756
757 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
758 uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
759
760 uint64_t EB_Lo = Offset / 64;
761 uint64_t EB_Hi = (Offset + Size - 1) / 64;
762 FieldLo = FieldHi = NoClass;
763 if (EB_Lo) {
764 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
765 FieldLo = NoClass;
766 FieldHi = Integer;
767 } else {
768 FieldLo = Integer;
769 FieldHi = EB_Hi ? Integer : NoClass;
770 }
771 } else
772 classify(i->getType(), Context, Offset, FieldLo, FieldHi);
773 Lo = merge(Lo, FieldLo);
774 Hi = merge(Hi, FieldHi);
775 if (Lo == Memory || Hi == Memory)
776 break;
777 }
778
779 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
780 //
781 // (a) If one of the classes is MEMORY, the whole argument is
782 // passed in memory.
783 //
784 // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
785
786 // The first of these conditions is guaranteed by how we implement
787 // the merge (just bail).
788 //
789 // The second condition occurs in the case of unions; for example
790 // union { _Complex double; unsigned; }.
791 if (Hi == Memory)
792 Lo = Memory;
793 if (Hi == SSEUp && Lo != SSE)
794 Hi = SSE;
795 }
796}
797
798ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
799 const llvm::Type *CoerceTo,
800 ASTContext &Context) const {
Owen Anderson0032b272009-08-13 21:57:51 +0000801 if (CoerceTo == llvm::Type::getInt64Ty(CoerceTo->getContext())) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000802 // Integer and pointer types will end up in a general purpose
803 // register.
804 if (Ty->isIntegralType() || Ty->isPointerType())
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +0000805 return (Ty->isPromotableIntegerType() ?
806 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Owen Anderson0032b272009-08-13 21:57:51 +0000807 } else if (CoerceTo == llvm::Type::getDoubleTy(CoerceTo->getContext())) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000808 // FIXME: It would probably be better to make CGFunctionInfo only map using
809 // canonical types than to canonize here.
810 QualType CTy = Context.getCanonicalType(Ty);
811
812 // Float and double end up in a single SSE reg.
813 if (CTy == Context.FloatTy || CTy == Context.DoubleTy)
814 return ABIArgInfo::getDirect();
815
816 }
817
818 return ABIArgInfo::getCoerce(CoerceTo);
819}
820
821ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
822 ASTContext &Context) const {
823 // If this is a scalar LLVM value then assume LLVM will pass it in the right
824 // place naturally.
825 if (!CodeGenFunction::hasAggregateLLVMType(Ty))
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +0000826 return (Ty->isPromotableIntegerType() ?
827 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000828
829 // FIXME: Set alignment correctly.
830 return ABIArgInfo::getIndirect(0);
831}
832
833ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000834 ASTContext &Context,
835 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000836 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
837 // classification algorithm.
838 X86_64ABIInfo::Class Lo, Hi;
839 classify(RetTy, Context, 0, Lo, Hi);
840
841 // Check some invariants.
842 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
843 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
844 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
845
846 const llvm::Type *ResType = 0;
847 switch (Lo) {
848 case NoClass:
849 return ABIArgInfo::getIgnore();
850
851 case SSEUp:
852 case X87Up:
853 assert(0 && "Invalid classification for lo word.");
854
855 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
856 // hidden argument.
857 case Memory:
858 return getIndirectResult(RetTy, Context);
859
860 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
861 // available register of the sequence %rax, %rdx is used.
862 case Integer:
Owen Anderson0032b272009-08-13 21:57:51 +0000863 ResType = llvm::Type::getInt64Ty(VMContext); break;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000864
865 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
866 // available SSE register of the sequence %xmm0, %xmm1 is used.
867 case SSE:
Owen Anderson0032b272009-08-13 21:57:51 +0000868 ResType = llvm::Type::getDoubleTy(VMContext); break;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000869
870 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
871 // returned on the X87 stack in %st0 as 80-bit x87 number.
872 case X87:
Owen Anderson0032b272009-08-13 21:57:51 +0000873 ResType = llvm::Type::getX86_FP80Ty(VMContext); break;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000874
875 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
876 // part of the value is returned in %st0 and the imaginary part in
877 // %st1.
878 case ComplexX87:
879 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
Owen Anderson0032b272009-08-13 21:57:51 +0000880 ResType = llvm::StructType::get(VMContext, llvm::Type::getX86_FP80Ty(VMContext),
881 llvm::Type::getX86_FP80Ty(VMContext),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000882 NULL);
883 break;
884 }
885
886 switch (Hi) {
887 // Memory was handled previously and X87 should
888 // never occur as a hi class.
889 case Memory:
890 case X87:
891 assert(0 && "Invalid classification for hi word.");
892
893 case ComplexX87: // Previously handled.
894 case NoClass: break;
895
896 case Integer:
Owen Anderson47a434f2009-08-05 23:18:46 +0000897 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson0032b272009-08-13 21:57:51 +0000898 llvm::Type::getInt64Ty(VMContext), NULL);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000899 break;
900 case SSE:
Owen Anderson47a434f2009-08-05 23:18:46 +0000901 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson0032b272009-08-13 21:57:51 +0000902 llvm::Type::getDoubleTy(VMContext), NULL);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000903 break;
904
905 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
906 // is passed in the upper half of the last used SSE register.
907 //
908 // SSEUP should always be preceeded by SSE, just widen.
909 case SSEUp:
910 assert(Lo == SSE && "Unexpected SSEUp classification.");
Owen Anderson0032b272009-08-13 21:57:51 +0000911 ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000912 break;
913
914 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
915 // returned together with the previous X87 value in %st0.
916 case X87Up:
917 // If X87Up is preceeded by X87, we don't need to do
918 // anything. However, in some cases with unions it may not be
919 // preceeded by X87. In such situations we follow gcc and pass the
920 // extra bits in an SSE reg.
921 if (Lo != X87)
Owen Anderson47a434f2009-08-05 23:18:46 +0000922 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson0032b272009-08-13 21:57:51 +0000923 llvm::Type::getDoubleTy(VMContext), NULL);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000924 break;
925 }
926
927 return getCoerceResult(RetTy, ResType, Context);
928}
929
930ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000931 llvm::LLVMContext &VMContext,
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000932 unsigned &neededInt,
933 unsigned &neededSSE) const {
934 X86_64ABIInfo::Class Lo, Hi;
935 classify(Ty, Context, 0, Lo, Hi);
936
937 // Check some invariants.
938 // FIXME: Enforce these by construction.
939 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
940 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
941 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
942
943 neededInt = 0;
944 neededSSE = 0;
945 const llvm::Type *ResType = 0;
946 switch (Lo) {
947 case NoClass:
948 return ABIArgInfo::getIgnore();
949
950 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
951 // on the stack.
952 case Memory:
953
954 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
955 // COMPLEX_X87, it is passed in memory.
956 case X87:
957 case ComplexX87:
958 return getIndirectResult(Ty, Context);
959
960 case SSEUp:
961 case X87Up:
962 assert(0 && "Invalid classification for lo word.");
963
964 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
965 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
966 // and %r9 is used.
967 case Integer:
968 ++neededInt;
Owen Anderson0032b272009-08-13 21:57:51 +0000969 ResType = llvm::Type::getInt64Ty(VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000970 break;
971
972 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
973 // available SSE register is used, the registers are taken in the
974 // order from %xmm0 to %xmm7.
975 case SSE:
976 ++neededSSE;
Owen Anderson0032b272009-08-13 21:57:51 +0000977 ResType = llvm::Type::getDoubleTy(VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000978 break;
979 }
980
981 switch (Hi) {
982 // Memory was handled previously, ComplexX87 and X87 should
983 // never occur as hi classes, and X87Up must be preceed by X87,
984 // which is passed in memory.
985 case Memory:
986 case X87:
987 case ComplexX87:
988 assert(0 && "Invalid classification for hi word.");
989 break;
990
991 case NoClass: break;
992 case Integer:
Owen Anderson47a434f2009-08-05 23:18:46 +0000993 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson0032b272009-08-13 21:57:51 +0000994 llvm::Type::getInt64Ty(VMContext), NULL);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000995 ++neededInt;
996 break;
997
998 // X87Up generally doesn't occur here (long double is passed in
999 // memory), except in situations involving unions.
1000 case X87Up:
1001 case SSE:
Owen Anderson47a434f2009-08-05 23:18:46 +00001002 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson0032b272009-08-13 21:57:51 +00001003 llvm::Type::getDoubleTy(VMContext), NULL);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001004 ++neededSSE;
1005 break;
1006
1007 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
1008 // eightbyte is passed in the upper half of the last used SSE
1009 // register.
1010 case SSEUp:
1011 assert(Lo == SSE && "Unexpected SSEUp classification.");
Owen Anderson0032b272009-08-13 21:57:51 +00001012 ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001013 break;
1014 }
1015
1016 return getCoerceResult(Ty, ResType, Context);
1017}
1018
Owen Andersona1cf15f2009-07-14 23:10:40 +00001019void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1020 llvm::LLVMContext &VMContext) const {
1021 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
1022 Context, VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001023
1024 // Keep track of the number of assigned registers.
1025 unsigned freeIntRegs = 6, freeSSERegs = 8;
1026
1027 // If the return value is indirect, then the hidden argument is consuming one
1028 // integer register.
1029 if (FI.getReturnInfo().isIndirect())
1030 --freeIntRegs;
1031
1032 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
1033 // get assigned (in left-to-right order) for passing as follows...
1034 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1035 it != ie; ++it) {
1036 unsigned neededInt, neededSSE;
Owen Andersona1cf15f2009-07-14 23:10:40 +00001037 it->info = classifyArgumentType(it->type, Context, VMContext,
1038 neededInt, neededSSE);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001039
1040 // AMD64-ABI 3.2.3p3: If there are no registers available for any
1041 // eightbyte of an argument, the whole argument is passed on the
1042 // stack. If registers have already been assigned for some
1043 // eightbytes of such an argument, the assignments get reverted.
1044 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
1045 freeIntRegs -= neededInt;
1046 freeSSERegs -= neededSSE;
1047 } else {
1048 it->info = getIndirectResult(it->type, Context);
1049 }
1050 }
1051}
1052
1053static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
1054 QualType Ty,
1055 CodeGenFunction &CGF) {
1056 llvm::Value *overflow_arg_area_p =
1057 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
1058 llvm::Value *overflow_arg_area =
1059 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
1060
1061 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
1062 // byte boundary if alignment needed by type exceeds 8 byte boundary.
1063 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
1064 if (Align > 8) {
1065 // Note that we follow the ABI & gcc here, even though the type
1066 // could in theory have an alignment greater than 16. This case
1067 // shouldn't ever matter in practice.
1068
1069 // overflow_arg_area = (overflow_arg_area + 15) & ~15;
Owen Anderson0032b272009-08-13 21:57:51 +00001070 llvm::Value *Offset =
1071 llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), 15);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001072 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
1073 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
Owen Anderson0032b272009-08-13 21:57:51 +00001074 llvm::Type::getInt64Ty(CGF.getLLVMContext()));
1075 llvm::Value *Mask = llvm::ConstantInt::get(
1076 llvm::Type::getInt64Ty(CGF.getLLVMContext()), ~15LL);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001077 overflow_arg_area =
1078 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1079 overflow_arg_area->getType(),
1080 "overflow_arg_area.align");
1081 }
1082
1083 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
1084 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1085 llvm::Value *Res =
1086 CGF.Builder.CreateBitCast(overflow_arg_area,
Owen Anderson96e0fc72009-07-29 22:16:19 +00001087 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001088
1089 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
1090 // l->overflow_arg_area + sizeof(type).
1091 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
1092 // an 8 byte boundary.
1093
1094 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
Owen Anderson0032b272009-08-13 21:57:51 +00001095 llvm::Value *Offset =
1096 llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001097 (SizeInBytes + 7) & ~7);
1098 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
1099 "overflow_arg_area.next");
1100 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
1101
1102 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
1103 return Res;
1104}
1105
1106llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1107 CodeGenFunction &CGF) const {
Owen Andersona1cf15f2009-07-14 23:10:40 +00001108 llvm::LLVMContext &VMContext = CGF.getLLVMContext();
1109
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001110 // Assume that va_list type is correct; should be pointer to LLVM type:
1111 // struct {
1112 // i32 gp_offset;
1113 // i32 fp_offset;
1114 // i8* overflow_arg_area;
1115 // i8* reg_save_area;
1116 // };
1117 unsigned neededInt, neededSSE;
Owen Andersona1cf15f2009-07-14 23:10:40 +00001118 ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), VMContext,
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001119 neededInt, neededSSE);
1120
1121 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
1122 // in the registers. If not go to step 7.
1123 if (!neededInt && !neededSSE)
1124 return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1125
1126 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
1127 // general purpose registers needed to pass type and num_fp to hold
1128 // the number of floating point registers needed.
1129
1130 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
1131 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
1132 // l->fp_offset > 304 - num_fp * 16 go to step 7.
1133 //
1134 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
1135 // register save space).
1136
1137 llvm::Value *InRegs = 0;
1138 llvm::Value *gp_offset_p = 0, *gp_offset = 0;
1139 llvm::Value *fp_offset_p = 0, *fp_offset = 0;
1140 if (neededInt) {
1141 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
1142 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
1143 InRegs =
1144 CGF.Builder.CreateICmpULE(gp_offset,
Owen Anderson0032b272009-08-13 21:57:51 +00001145 llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001146 48 - neededInt * 8),
1147 "fits_in_gp");
1148 }
1149
1150 if (neededSSE) {
1151 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
1152 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
1153 llvm::Value *FitsInFP =
1154 CGF.Builder.CreateICmpULE(fp_offset,
Owen Anderson0032b272009-08-13 21:57:51 +00001155 llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001156 176 - neededSSE * 16),
1157 "fits_in_fp");
1158 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
1159 }
1160
1161 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
1162 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
1163 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
1164 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
1165
1166 // Emit code to load the value if it was passed in registers.
1167
1168 CGF.EmitBlock(InRegBlock);
1169
1170 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
1171 // an offset of l->gp_offset and/or l->fp_offset. This may require
1172 // copying to a temporary location in case the parameter is passed
1173 // in different register classes or requires an alignment greater
1174 // than 8 for general purpose registers and 16 for XMM registers.
1175 //
1176 // FIXME: This really results in shameful code when we end up needing to
1177 // collect arguments from different places; often what should result in a
1178 // simple assembling of a structure from scattered addresses has many more
1179 // loads than necessary. Can we clean this up?
1180 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1181 llvm::Value *RegAddr =
1182 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
1183 "reg_save_area");
1184 if (neededInt && neededSSE) {
1185 // FIXME: Cleanup.
1186 assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
1187 const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
1188 llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
1189 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
1190 const llvm::Type *TyLo = ST->getElementType(0);
1191 const llvm::Type *TyHi = ST->getElementType(1);
1192 assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) &&
1193 "Unexpected ABI info for mixed regs");
Owen Anderson96e0fc72009-07-29 22:16:19 +00001194 const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
1195 const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001196 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1197 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1198 llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr;
1199 llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr;
1200 llvm::Value *V =
1201 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
1202 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1203 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
1204 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1205
Owen Andersona1cf15f2009-07-14 23:10:40 +00001206 RegAddr = CGF.Builder.CreateBitCast(Tmp,
Owen Anderson96e0fc72009-07-29 22:16:19 +00001207 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001208 } else if (neededInt) {
1209 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1210 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
Owen Anderson96e0fc72009-07-29 22:16:19 +00001211 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001212 } else {
1213 if (neededSSE == 1) {
1214 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1215 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
Owen Anderson96e0fc72009-07-29 22:16:19 +00001216 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001217 } else {
1218 assert(neededSSE == 2 && "Invalid number of needed registers!");
1219 // SSE registers are spaced 16 bytes apart in the register save
1220 // area, we need to collect the two eightbytes together.
1221 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1222 llvm::Value *RegAddrHi =
1223 CGF.Builder.CreateGEP(RegAddrLo,
Owen Anderson0032b272009-08-13 21:57:51 +00001224 llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 16));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001225 const llvm::Type *DblPtrTy =
Owen Anderson0032b272009-08-13 21:57:51 +00001226 llvm::PointerType::getUnqual(llvm::Type::getDoubleTy(VMContext));
Owen Anderson47a434f2009-08-05 23:18:46 +00001227 const llvm::StructType *ST = llvm::StructType::get(VMContext,
Owen Anderson0032b272009-08-13 21:57:51 +00001228 llvm::Type::getDoubleTy(VMContext),
1229 llvm::Type::getDoubleTy(VMContext),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001230 NULL);
1231 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
1232 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
1233 DblPtrTy));
1234 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1235 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
1236 DblPtrTy));
1237 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1238 RegAddr = CGF.Builder.CreateBitCast(Tmp,
Owen Anderson96e0fc72009-07-29 22:16:19 +00001239 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001240 }
1241 }
1242
1243 // AMD64-ABI 3.5.7p5: Step 5. Set:
1244 // l->gp_offset = l->gp_offset + num_gp * 8
1245 // l->fp_offset = l->fp_offset + num_fp * 16.
1246 if (neededInt) {
Owen Anderson0032b272009-08-13 21:57:51 +00001247 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001248 neededInt * 8);
1249 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
1250 gp_offset_p);
1251 }
1252 if (neededSSE) {
Owen Anderson0032b272009-08-13 21:57:51 +00001253 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001254 neededSSE * 16);
1255 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
1256 fp_offset_p);
1257 }
1258 CGF.EmitBranch(ContBlock);
1259
1260 // Emit code to load the value if it was passed in memory.
1261
1262 CGF.EmitBlock(InMemBlock);
1263 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1264
1265 // Return the appropriate result.
1266
1267 CGF.EmitBlock(ContBlock);
1268 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
1269 "vaarg.addr");
1270 ResAddr->reserveOperandSpace(2);
1271 ResAddr->addIncoming(RegAddr, InRegBlock);
1272 ResAddr->addIncoming(MemAddr, InMemBlock);
1273
1274 return ResAddr;
1275}
1276
1277// ABI Info for PIC16
1278class PIC16ABIInfo : public ABIInfo {
1279 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001280 ASTContext &Context,
1281 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001282
1283 ABIArgInfo classifyArgumentType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001284 ASTContext &Context,
1285 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001286
Owen Andersona1cf15f2009-07-14 23:10:40 +00001287 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1288 llvm::LLVMContext &VMContext) const {
1289 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
1290 VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001291 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1292 it != ie; ++it)
Owen Andersona1cf15f2009-07-14 23:10:40 +00001293 it->info = classifyArgumentType(it->type, Context, VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001294 }
1295
1296 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1297 CodeGenFunction &CGF) const;
1298
1299};
1300
1301ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001302 ASTContext &Context,
1303 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001304 if (RetTy->isVoidType()) {
1305 return ABIArgInfo::getIgnore();
1306 } else {
1307 return ABIArgInfo::getDirect();
1308 }
1309}
1310
1311ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001312 ASTContext &Context,
1313 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001314 return ABIArgInfo::getDirect();
1315}
1316
1317llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1318 CodeGenFunction &CGF) const {
1319 return 0;
1320}
1321
1322class ARMABIInfo : public ABIInfo {
1323 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001324 ASTContext &Context,
1325 llvm::LLVMContext &VMCOntext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001326
1327 ABIArgInfo classifyArgumentType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001328 ASTContext &Context,
1329 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001330
Owen Andersona1cf15f2009-07-14 23:10:40 +00001331 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1332 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001333
1334 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1335 CodeGenFunction &CGF) const;
1336};
1337
Owen Andersona1cf15f2009-07-14 23:10:40 +00001338void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1339 llvm::LLVMContext &VMContext) const {
1340 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
1341 VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001342 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1343 it != ie; ++it) {
Owen Andersona1cf15f2009-07-14 23:10:40 +00001344 it->info = classifyArgumentType(it->type, Context, VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001345 }
1346}
1347
1348ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001349 ASTContext &Context,
1350 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001351 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +00001352 return (Ty->isPromotableIntegerType() ?
1353 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001354 }
1355 // FIXME: This is kind of nasty... but there isn't much choice because the ARM
1356 // backend doesn't support byval.
1357 // FIXME: This doesn't handle alignment > 64 bits.
1358 const llvm::Type* ElemTy;
1359 unsigned SizeRegs;
1360 if (Context.getTypeAlign(Ty) > 32) {
Owen Anderson0032b272009-08-13 21:57:51 +00001361 ElemTy = llvm::Type::getInt64Ty(VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001362 SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
1363 } else {
Owen Anderson0032b272009-08-13 21:57:51 +00001364 ElemTy = llvm::Type::getInt32Ty(VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001365 SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
1366 }
1367 std::vector<const llvm::Type*> LLVMFields;
Owen Anderson96e0fc72009-07-29 22:16:19 +00001368 LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
Owen Anderson47a434f2009-08-05 23:18:46 +00001369 const llvm::Type* STy = llvm::StructType::get(VMContext, LLVMFields, true);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001370 return ABIArgInfo::getCoerce(STy);
1371}
1372
1373ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001374 ASTContext &Context,
1375 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001376 if (RetTy->isVoidType()) {
1377 return ABIArgInfo::getIgnore();
1378 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1379 // Aggregates <= 4 bytes are returned in r0; other aggregates
1380 // are returned indirectly.
1381 uint64_t Size = Context.getTypeSize(RetTy);
1382 if (Size <= 32)
Owen Anderson0032b272009-08-13 21:57:51 +00001383 return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001384 return ABIArgInfo::getIndirect(0);
1385 } else {
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +00001386 return (RetTy->isPromotableIntegerType() ?
1387 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001388 }
1389}
1390
1391llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1392 CodeGenFunction &CGF) const {
1393 // FIXME: Need to handle alignment
Owen Anderson0032b272009-08-13 21:57:51 +00001394 const llvm::Type *BP =
1395 llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(CGF.getLLVMContext()));
Owen Anderson96e0fc72009-07-29 22:16:19 +00001396 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001397
1398 CGBuilderTy &Builder = CGF.Builder;
1399 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1400 "ap");
1401 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
1402 llvm::Type *PTy =
Owen Anderson96e0fc72009-07-29 22:16:19 +00001403 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001404 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1405
1406 uint64_t Offset =
1407 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
1408 llvm::Value *NextAddr =
Owen Anderson0032b272009-08-13 21:57:51 +00001409 Builder.CreateGEP(Addr, llvm::ConstantInt::get(
1410 llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001411 "ap.next");
1412 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1413
1414 return AddrTyped;
1415}
1416
1417ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001418 ASTContext &Context,
1419 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001420 if (RetTy->isVoidType()) {
1421 return ABIArgInfo::getIgnore();
1422 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1423 return ABIArgInfo::getIndirect(0);
1424 } else {
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +00001425 return (RetTy->isPromotableIntegerType() ?
1426 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001427 }
1428}
1429
Anton Korobeynikov89e887f2009-07-16 20:09:57 +00001430namespace {
1431class SystemZABIInfo : public ABIInfo {
1432 bool isPromotableIntegerType(QualType Ty) const;
1433
1434 ABIArgInfo classifyReturnType(QualType RetTy, ASTContext &Context,
1435 llvm::LLVMContext &VMContext) const;
1436
1437 ABIArgInfo classifyArgumentType(QualType RetTy, ASTContext &Context,
1438 llvm::LLVMContext &VMContext) const;
1439
1440 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1441 llvm::LLVMContext &VMContext) const {
1442 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
1443 Context, VMContext);
1444 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1445 it != ie; ++it)
1446 it->info = classifyArgumentType(it->type, Context, VMContext);
1447 }
1448
1449 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1450 CodeGenFunction &CGF) const;
1451};
1452}
1453
1454bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
1455 // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended.
1456 if (const BuiltinType *BT = Ty->getAsBuiltinType())
1457 switch (BT->getKind()) {
1458 case BuiltinType::Bool:
1459 case BuiltinType::Char_S:
1460 case BuiltinType::Char_U:
1461 case BuiltinType::SChar:
1462 case BuiltinType::UChar:
1463 case BuiltinType::Short:
1464 case BuiltinType::UShort:
1465 case BuiltinType::Int:
1466 case BuiltinType::UInt:
1467 return true;
1468 default:
1469 return false;
1470 }
1471 return false;
1472}
1473
1474llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1475 CodeGenFunction &CGF) const {
1476 // FIXME: Implement
1477 return 0;
1478}
1479
1480
1481ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy,
1482 ASTContext &Context,
1483 llvm::LLVMContext &VMContext) const {
1484 if (RetTy->isVoidType()) {
1485 return ABIArgInfo::getIgnore();
1486 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1487 return ABIArgInfo::getIndirect(0);
1488 } else {
1489 return (isPromotableIntegerType(RetTy) ?
1490 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1491 }
1492}
1493
1494ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty,
1495 ASTContext &Context,
1496 llvm::LLVMContext &VMContext) const {
1497 if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
1498 return ABIArgInfo::getIndirect(0);
1499 } else {
1500 return (isPromotableIntegerType(Ty) ?
1501 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1502 }
1503}
1504
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001505ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001506 ASTContext &Context,
1507 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001508 if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
1509 return ABIArgInfo::getIndirect(0);
1510 } else {
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +00001511 return (Ty->isPromotableIntegerType() ?
1512 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001513 }
1514}
1515
1516llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1517 CodeGenFunction &CGF) const {
1518 return 0;
1519}
1520
1521const ABIInfo &CodeGenTypes::getABIInfo() const {
1522 if (TheABIInfo)
1523 return *TheABIInfo;
1524
Daniel Dunbar2c0843f2009-08-24 08:52:16 +00001525 // For now we just cache the ABIInfo in CodeGenTypes and don't free it.
1526
Daniel Dunbar1752ee42009-08-24 09:10:05 +00001527 const llvm::Triple &Triple(getContext().Target.getTriple());
1528 switch (Triple.getArch()) {
Daniel Dunbar2c0843f2009-08-24 08:52:16 +00001529 default:
1530 return *(TheABIInfo = new DefaultABIInfo);
1531
Daniel Dunbar1752ee42009-08-24 09:10:05 +00001532 case llvm::Triple::x86:
1533 if (Triple.getOS() == llvm::Triple::Darwin)
Daniel Dunbar2c0843f2009-08-24 08:52:16 +00001534 return *(TheABIInfo = new X86_32ABIInfo(Context, true, true));
1535
Daniel Dunbar1752ee42009-08-24 09:10:05 +00001536 switch (Triple.getOS()) {
Daniel Dunbar2c0843f2009-08-24 08:52:16 +00001537 case llvm::Triple::Cygwin:
1538 case llvm::Triple::DragonFly:
1539 case llvm::Triple::MinGW32:
1540 case llvm::Triple::MinGW64:
1541 case llvm::Triple::NetBSD:
1542 case llvm::Triple::OpenBSD:
1543 return *(TheABIInfo = new X86_32ABIInfo(Context, false, true));
1544
1545 default:
1546 return *(TheABIInfo = new X86_32ABIInfo(Context, false, false));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001547 }
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001548
Daniel Dunbar2c0843f2009-08-24 08:52:16 +00001549 case llvm::Triple::x86_64:
1550 return *(TheABIInfo = new X86_64ABIInfo());
1551
1552 case llvm::Triple::arm:
1553 // FIXME: Support for OABI?
1554 return *(TheABIInfo = new ARMABIInfo());
1555
1556 case llvm::Triple::pic16:
1557 return *(TheABIInfo = new PIC16ABIInfo());
1558
1559 case llvm::Triple::systemz:
1560 return *(TheABIInfo = new SystemZABIInfo());
1561 }
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001562}