blob: 0ca2bb29b95c253e236c9ba00e534b6d19d21efc [file] [log] [blame]
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001//===---- TargetABIInfo.cpp - Encapsulate target ABI details ----*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "ABIInfo.h"
16#include "CodeGenFunction.h"
Anders Carlsson19cc4ab2009-07-18 19:43:29 +000017#include "clang/AST/RecordLayout.h"
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +000018#include "llvm/Type.h"
Daniel Dunbar2c0843f2009-08-24 08:52:16 +000019#include "llvm/ADT/Triple.h"
Torok Edwinf42e4a62009-08-24 13:25:12 +000020#include <cstdio>
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +000021
22using namespace clang;
23using namespace CodeGen;
24
25ABIInfo::~ABIInfo() {}
26
27void ABIArgInfo::dump() const {
28 fprintf(stderr, "(ABIArgInfo Kind=");
29 switch (TheKind) {
30 case Direct:
31 fprintf(stderr, "Direct");
32 break;
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +000033 case Extend:
34 fprintf(stderr, "Extend");
35 break;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +000036 case Ignore:
37 fprintf(stderr, "Ignore");
38 break;
39 case Coerce:
40 fprintf(stderr, "Coerce Type=");
41 getCoerceToType()->print(llvm::errs());
42 break;
43 case Indirect:
44 fprintf(stderr, "Indirect Align=%d", getIndirectAlign());
45 break;
46 case Expand:
47 fprintf(stderr, "Expand");
48 break;
49 }
50 fprintf(stderr, ")\n");
51}
52
53static bool isEmptyRecord(ASTContext &Context, QualType T);
54
55/// isEmptyField - Return true iff a the field is "empty", that is it
56/// is an unnamed bit-field or an (array of) empty record(s).
57static bool isEmptyField(ASTContext &Context, const FieldDecl *FD) {
58 if (FD->isUnnamedBitfield())
59 return true;
60
61 QualType FT = FD->getType();
62 // Constant arrays of empty records count as empty, strip them off.
63 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
64 FT = AT->getElementType();
65
66 return isEmptyRecord(Context, FT);
67}
68
69/// isEmptyRecord - Return true iff a structure contains only empty
70/// fields. Note that a structure with a flexible array member is not
71/// considered empty.
72static bool isEmptyRecord(ASTContext &Context, QualType T) {
Ted Kremenek6217b802009-07-29 21:53:49 +000073 const RecordType *RT = T->getAs<RecordType>();
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +000074 if (!RT)
75 return 0;
76 const RecordDecl *RD = RT->getDecl();
77 if (RD->hasFlexibleArrayMember())
78 return false;
Argyrios Kyrtzidis17945a02009-06-30 02:36:12 +000079 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
80 i != e; ++i)
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +000081 if (!isEmptyField(Context, *i))
82 return false;
83 return true;
84}
85
86/// isSingleElementStruct - Determine if a structure is a "single
87/// element struct", i.e. it has exactly one non-empty field or
88/// exactly one field which is itself a single element
89/// struct. Structures with flexible array members are never
90/// considered single element structs.
91///
92/// \return The field declaration for the single non-empty field, if
93/// it exists.
94static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
95 const RecordType *RT = T->getAsStructureType();
96 if (!RT)
97 return 0;
98
99 const RecordDecl *RD = RT->getDecl();
100 if (RD->hasFlexibleArrayMember())
101 return 0;
102
103 const Type *Found = 0;
Argyrios Kyrtzidis17945a02009-06-30 02:36:12 +0000104 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
105 i != e; ++i) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000106 const FieldDecl *FD = *i;
107 QualType FT = FD->getType();
108
109 // Ignore empty fields.
110 if (isEmptyField(Context, FD))
111 continue;
112
113 // If we already found an element then this isn't a single-element
114 // struct.
115 if (Found)
116 return 0;
117
118 // Treat single element arrays as the element.
119 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
120 if (AT->getSize().getZExtValue() != 1)
121 break;
122 FT = AT->getElementType();
123 }
124
125 if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
126 Found = FT.getTypePtr();
127 } else {
128 Found = isSingleElementStruct(FT, Context);
129 if (!Found)
130 return 0;
131 }
132 }
133
134 return Found;
135}
136
137static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
138 if (!Ty->getAsBuiltinType() && !Ty->isPointerType())
139 return false;
140
141 uint64_t Size = Context.getTypeSize(Ty);
142 return Size == 32 || Size == 64;
143}
144
145static bool areAllFields32Or64BitBasicType(const RecordDecl *RD,
146 ASTContext &Context) {
Argyrios Kyrtzidis17945a02009-06-30 02:36:12 +0000147 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
148 i != e; ++i) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000149 const FieldDecl *FD = *i;
150
151 if (!is32Or64BitBasicType(FD->getType(), Context))
152 return false;
153
154 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
155 // how to expand them yet, and the predicate for telling if a bitfield still
156 // counts as "basic" is more complicated than what we were doing previously.
157 if (FD->isBitField())
158 return false;
159 }
160
161 return true;
162}
163
Eli Friedmana1e6de92009-06-13 21:37:10 +0000164static bool typeContainsSSEVector(const RecordDecl *RD, ASTContext &Context) {
Argyrios Kyrtzidis17945a02009-06-30 02:36:12 +0000165 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
166 i != e; ++i) {
Eli Friedmana1e6de92009-06-13 21:37:10 +0000167 const FieldDecl *FD = *i;
168
169 if (FD->getType()->isVectorType() &&
170 Context.getTypeSize(FD->getType()) >= 128)
171 return true;
172
Ted Kremenek6217b802009-07-29 21:53:49 +0000173 if (const RecordType* RT = FD->getType()->getAs<RecordType>())
Eli Friedmana1e6de92009-06-13 21:37:10 +0000174 if (typeContainsSSEVector(RT->getDecl(), Context))
175 return true;
176 }
177
178 return false;
179}
180
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000181namespace {
182/// DefaultABIInfo - The default implementation for ABI specific
183/// details. This implementation provides information which results in
184/// self-consistent and sensible LLVM IR generation, but does not
185/// conform to any particular ABI.
186class DefaultABIInfo : public ABIInfo {
187 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000188 ASTContext &Context,
189 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000190
191 ABIArgInfo classifyArgumentType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000192 ASTContext &Context,
193 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000194
Owen Andersona1cf15f2009-07-14 23:10:40 +0000195 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
196 llvm::LLVMContext &VMContext) const {
197 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
198 VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000199 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
200 it != ie; ++it)
Owen Andersona1cf15f2009-07-14 23:10:40 +0000201 it->info = classifyArgumentType(it->type, Context, VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000202 }
203
204 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
205 CodeGenFunction &CGF) const;
206};
207
208/// X86_32ABIInfo - The X86-32 ABI information.
209class X86_32ABIInfo : public ABIInfo {
210 ASTContext &Context;
David Chisnall1e4249c2009-08-17 23:08:21 +0000211 bool IsDarwinVectorABI;
212 bool IsSmallStructInRegABI;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000213
214 static bool isRegisterSize(unsigned Size) {
215 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
216 }
217
218 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
219
Eli Friedmana1e6de92009-06-13 21:37:10 +0000220 static unsigned getIndirectArgumentAlignment(QualType Ty,
221 ASTContext &Context);
222
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000223public:
224 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000225 ASTContext &Context,
226 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000227
228 ABIArgInfo classifyArgumentType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000229 ASTContext &Context,
230 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000231
Owen Andersona1cf15f2009-07-14 23:10:40 +0000232 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
233 llvm::LLVMContext &VMContext) const {
234 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
235 VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000236 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
237 it != ie; ++it)
Owen Andersona1cf15f2009-07-14 23:10:40 +0000238 it->info = classifyArgumentType(it->type, Context, VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000239 }
240
241 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
242 CodeGenFunction &CGF) const;
243
David Chisnall1e4249c2009-08-17 23:08:21 +0000244 X86_32ABIInfo(ASTContext &Context, bool d, bool p)
245 : ABIInfo(), Context(Context), IsDarwinVectorABI(d),
246 IsSmallStructInRegABI(p) {}
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000247};
248}
249
250
251/// shouldReturnTypeInRegister - Determine if the given type should be
252/// passed in a register (for the Darwin ABI).
253bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
254 ASTContext &Context) {
255 uint64_t Size = Context.getTypeSize(Ty);
256
257 // Type must be register sized.
258 if (!isRegisterSize(Size))
259 return false;
260
261 if (Ty->isVectorType()) {
262 // 64- and 128- bit vectors inside structures are not returned in
263 // registers.
264 if (Size == 64 || Size == 128)
265 return false;
266
267 return true;
268 }
269
270 // If this is a builtin, pointer, or complex type, it is ok.
271 if (Ty->getAsBuiltinType() || Ty->isPointerType() || Ty->isAnyComplexType())
272 return true;
273
274 // Arrays are treated like records.
275 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
276 return shouldReturnTypeInRegister(AT->getElementType(), Context);
277
278 // Otherwise, it must be a record type.
Ted Kremenek6217b802009-07-29 21:53:49 +0000279 const RecordType *RT = Ty->getAs<RecordType>();
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000280 if (!RT) return false;
281
282 // Structure types are passed in register if all fields would be
283 // passed in a register.
Argyrios Kyrtzidis17945a02009-06-30 02:36:12 +0000284 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
285 e = RT->getDecl()->field_end(); i != e; ++i) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000286 const FieldDecl *FD = *i;
287
288 // Empty fields are ignored.
289 if (isEmptyField(Context, FD))
290 continue;
291
292 // Check fields recursively.
293 if (!shouldReturnTypeInRegister(FD->getType(), Context))
294 return false;
295 }
296
297 return true;
298}
299
300ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000301 ASTContext &Context,
302 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000303 if (RetTy->isVoidType()) {
304 return ABIArgInfo::getIgnore();
305 } else if (const VectorType *VT = RetTy->getAsVectorType()) {
306 // On Darwin, some vectors are returned in registers.
David Chisnall1e4249c2009-08-17 23:08:21 +0000307 if (IsDarwinVectorABI) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000308 uint64_t Size = Context.getTypeSize(RetTy);
309
310 // 128-bit vectors are a special case; they are returned in
311 // registers and we need to make sure to pick a type the LLVM
312 // backend will like.
313 if (Size == 128)
Owen Anderson0032b272009-08-13 21:57:51 +0000314 return ABIArgInfo::getCoerce(llvm::VectorType::get(
315 llvm::Type::getInt64Ty(VMContext), 2));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000316
317 // Always return in register if it fits in a general purpose
318 // register, or if it is 64 bits and has a single element.
319 if ((Size == 8 || Size == 16 || Size == 32) ||
320 (Size == 64 && VT->getNumElements() == 1))
Owen Anderson0032b272009-08-13 21:57:51 +0000321 return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000322
323 return ABIArgInfo::getIndirect(0);
324 }
325
326 return ABIArgInfo::getDirect();
327 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
328 // Structures with flexible arrays are always indirect.
329 if (const RecordType *RT = RetTy->getAsStructureType())
330 if (RT->getDecl()->hasFlexibleArrayMember())
331 return ABIArgInfo::getIndirect(0);
332
David Chisnall1e4249c2009-08-17 23:08:21 +0000333 // If specified, structs and unions are always indirect.
334 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000335 return ABIArgInfo::getIndirect(0);
336
337 // Classify "single element" structs as their element type.
338 if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
339 if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) {
340 if (BT->isIntegerType()) {
341 // We need to use the size of the structure, padding
342 // bit-fields can adjust that to be larger than the single
343 // element type.
344 uint64_t Size = Context.getTypeSize(RetTy);
Owen Andersona1cf15f2009-07-14 23:10:40 +0000345 return ABIArgInfo::getCoerce(
Owen Anderson0032b272009-08-13 21:57:51 +0000346 llvm::IntegerType::get(VMContext, (unsigned) Size));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000347 } else if (BT->getKind() == BuiltinType::Float) {
348 assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
349 "Unexpect single element structure size!");
Owen Anderson0032b272009-08-13 21:57:51 +0000350 return ABIArgInfo::getCoerce(llvm::Type::getFloatTy(VMContext));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000351 } else if (BT->getKind() == BuiltinType::Double) {
352 assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
353 "Unexpect single element structure size!");
Owen Anderson0032b272009-08-13 21:57:51 +0000354 return ABIArgInfo::getCoerce(llvm::Type::getDoubleTy(VMContext));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000355 }
356 } else if (SeltTy->isPointerType()) {
357 // FIXME: It would be really nice if this could come out as the proper
358 // pointer type.
359 llvm::Type *PtrTy =
Owen Anderson0032b272009-08-13 21:57:51 +0000360 llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000361 return ABIArgInfo::getCoerce(PtrTy);
362 } else if (SeltTy->isVectorType()) {
363 // 64- and 128-bit vectors are never returned in a
364 // register when inside a structure.
365 uint64_t Size = Context.getTypeSize(RetTy);
366 if (Size == 64 || Size == 128)
367 return ABIArgInfo::getIndirect(0);
368
Owen Andersona1cf15f2009-07-14 23:10:40 +0000369 return classifyReturnType(QualType(SeltTy, 0), Context, VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000370 }
371 }
372
373 // Small structures which are register sized are generally returned
374 // in a register.
375 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
376 uint64_t Size = Context.getTypeSize(RetTy);
Owen Anderson0032b272009-08-13 21:57:51 +0000377 return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000378 }
379
380 return ABIArgInfo::getIndirect(0);
381 } else {
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +0000382 return (RetTy->isPromotableIntegerType() ?
383 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000384 }
385}
386
Eli Friedmana1e6de92009-06-13 21:37:10 +0000387unsigned X86_32ABIInfo::getIndirectArgumentAlignment(QualType Ty,
388 ASTContext &Context) {
389 unsigned Align = Context.getTypeAlign(Ty);
390 if (Align < 128) return 0;
Ted Kremenek6217b802009-07-29 21:53:49 +0000391 if (const RecordType* RT = Ty->getAs<RecordType>())
Eli Friedmana1e6de92009-06-13 21:37:10 +0000392 if (typeContainsSSEVector(RT->getDecl(), Context))
393 return 16;
394 return 0;
395}
396
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000397ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000398 ASTContext &Context,
399 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000400 // FIXME: Set alignment on indirect arguments.
401 if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
402 // Structures with flexible arrays are always indirect.
403 if (const RecordType *RT = Ty->getAsStructureType())
404 if (RT->getDecl()->hasFlexibleArrayMember())
Eli Friedmana1e6de92009-06-13 21:37:10 +0000405 return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty,
406 Context));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000407
408 // Ignore empty structs.
Eli Friedmana1e6de92009-06-13 21:37:10 +0000409 if (Ty->isStructureType() && Context.getTypeSize(Ty) == 0)
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000410 return ABIArgInfo::getIgnore();
411
412 // Expand structs with size <= 128-bits which consist only of
413 // basic types (int, long long, float, double, xxx*). This is
414 // non-recursive and does not ignore empty fields.
415 if (const RecordType *RT = Ty->getAsStructureType()) {
416 if (Context.getTypeSize(Ty) <= 4*32 &&
417 areAllFields32Or64BitBasicType(RT->getDecl(), Context))
418 return ABIArgInfo::getExpand();
419 }
420
Eli Friedmana1e6de92009-06-13 21:37:10 +0000421 return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty, Context));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000422 } else {
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +0000423 return (Ty->isPromotableIntegerType() ?
424 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000425 }
426}
427
428llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
429 CodeGenFunction &CGF) const {
Owen Anderson0032b272009-08-13 21:57:51 +0000430 const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(CGF.getLLVMContext()));
Owen Anderson96e0fc72009-07-29 22:16:19 +0000431 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000432
433 CGBuilderTy &Builder = CGF.Builder;
434 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
435 "ap");
436 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
437 llvm::Type *PTy =
Owen Anderson96e0fc72009-07-29 22:16:19 +0000438 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000439 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
440
441 uint64_t Offset =
442 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
443 llvm::Value *NextAddr =
Owen Anderson0032b272009-08-13 21:57:51 +0000444 Builder.CreateGEP(Addr, llvm::ConstantInt::get(
445 llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000446 "ap.next");
447 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
448
449 return AddrTyped;
450}
451
452namespace {
453/// X86_64ABIInfo - The X86_64 ABI information.
454class X86_64ABIInfo : public ABIInfo {
455 enum Class {
456 Integer = 0,
457 SSE,
458 SSEUp,
459 X87,
460 X87Up,
461 ComplexX87,
462 NoClass,
463 Memory
464 };
465
466 /// merge - Implement the X86_64 ABI merging algorithm.
467 ///
468 /// Merge an accumulating classification \arg Accum with a field
469 /// classification \arg Field.
470 ///
471 /// \param Accum - The accumulating classification. This should
472 /// always be either NoClass or the result of a previous merge
473 /// call. In addition, this should never be Memory (the caller
474 /// should just return Memory for the aggregate).
475 Class merge(Class Accum, Class Field) const;
476
477 /// classify - Determine the x86_64 register classes in which the
478 /// given type T should be passed.
479 ///
480 /// \param Lo - The classification for the parts of the type
481 /// residing in the low word of the containing object.
482 ///
483 /// \param Hi - The classification for the parts of the type
484 /// residing in the high word of the containing object.
485 ///
486 /// \param OffsetBase - The bit offset of this type in the
487 /// containing object. Some parameters are classified different
488 /// depending on whether they straddle an eightbyte boundary.
489 ///
490 /// If a word is unused its result will be NoClass; if a type should
491 /// be passed in Memory then at least the classification of \arg Lo
492 /// will be Memory.
493 ///
494 /// The \arg Lo class will be NoClass iff the argument is ignored.
495 ///
496 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
497 /// also be ComplexX87.
498 void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
499 Class &Lo, Class &Hi) const;
500
501 /// getCoerceResult - Given a source type \arg Ty and an LLVM type
502 /// to coerce to, chose the best way to pass Ty in the same place
503 /// that \arg CoerceTo would be passed, but while keeping the
504 /// emitted code as simple as possible.
505 ///
506 /// FIXME: Note, this should be cleaned up to just take an enumeration of all
507 /// the ways we might want to pass things, instead of constructing an LLVM
508 /// type. This makes this code more explicit, and it makes it clearer that we
509 /// are also doing this for correctness in the case of passing scalar types.
510 ABIArgInfo getCoerceResult(QualType Ty,
511 const llvm::Type *CoerceTo,
512 ASTContext &Context) const;
513
514 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
515 /// such that the argument will be passed in memory.
516 ABIArgInfo getIndirectResult(QualType Ty,
517 ASTContext &Context) const;
518
519 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000520 ASTContext &Context,
521 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000522
523 ABIArgInfo classifyArgumentType(QualType Ty,
524 ASTContext &Context,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000525 llvm::LLVMContext &VMContext,
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000526 unsigned &neededInt,
527 unsigned &neededSSE) const;
528
529public:
Owen Andersona1cf15f2009-07-14 23:10:40 +0000530 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
531 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000532
533 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
534 CodeGenFunction &CGF) const;
535};
536}
537
538X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
539 Class Field) const {
540 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
541 // classified recursively so that always two fields are
542 // considered. The resulting class is calculated according to
543 // the classes of the fields in the eightbyte:
544 //
545 // (a) If both classes are equal, this is the resulting class.
546 //
547 // (b) If one of the classes is NO_CLASS, the resulting class is
548 // the other class.
549 //
550 // (c) If one of the classes is MEMORY, the result is the MEMORY
551 // class.
552 //
553 // (d) If one of the classes is INTEGER, the result is the
554 // INTEGER.
555 //
556 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
557 // MEMORY is used as class.
558 //
559 // (f) Otherwise class SSE is used.
560
561 // Accum should never be memory (we should have returned) or
562 // ComplexX87 (because this cannot be passed in a structure).
563 assert((Accum != Memory && Accum != ComplexX87) &&
564 "Invalid accumulated classification during merge.");
565 if (Accum == Field || Field == NoClass)
566 return Accum;
567 else if (Field == Memory)
568 return Memory;
569 else if (Accum == NoClass)
570 return Field;
571 else if (Accum == Integer || Field == Integer)
572 return Integer;
573 else if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
574 Accum == X87 || Accum == X87Up)
575 return Memory;
576 else
577 return SSE;
578}
579
580void X86_64ABIInfo::classify(QualType Ty,
581 ASTContext &Context,
582 uint64_t OffsetBase,
583 Class &Lo, Class &Hi) const {
584 // FIXME: This code can be simplified by introducing a simple value class for
585 // Class pairs with appropriate constructor methods for the various
586 // situations.
587
588 // FIXME: Some of the split computations are wrong; unaligned vectors
589 // shouldn't be passed in registers for example, so there is no chance they
590 // can straddle an eightbyte. Verify & simplify.
591
592 Lo = Hi = NoClass;
593
594 Class &Current = OffsetBase < 64 ? Lo : Hi;
595 Current = Memory;
596
597 if (const BuiltinType *BT = Ty->getAsBuiltinType()) {
598 BuiltinType::Kind k = BT->getKind();
599
600 if (k == BuiltinType::Void) {
601 Current = NoClass;
602 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
603 Lo = Integer;
604 Hi = Integer;
605 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
606 Current = Integer;
607 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
608 Current = SSE;
609 } else if (k == BuiltinType::LongDouble) {
610 Lo = X87;
611 Hi = X87Up;
612 }
613 // FIXME: _Decimal32 and _Decimal64 are SSE.
614 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
615 } else if (const EnumType *ET = Ty->getAsEnumType()) {
616 // Classify the underlying integer type.
617 classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
618 } else if (Ty->hasPointerRepresentation()) {
619 Current = Integer;
620 } else if (const VectorType *VT = Ty->getAsVectorType()) {
621 uint64_t Size = Context.getTypeSize(VT);
622 if (Size == 32) {
623 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
624 // float> as integer.
625 Current = Integer;
626
627 // If this type crosses an eightbyte boundary, it should be
628 // split.
629 uint64_t EB_Real = (OffsetBase) / 64;
630 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
631 if (EB_Real != EB_Imag)
632 Hi = Lo;
633 } else if (Size == 64) {
634 // gcc passes <1 x double> in memory. :(
635 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
636 return;
637
638 // gcc passes <1 x long long> as INTEGER.
639 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
640 Current = Integer;
641 else
642 Current = SSE;
643
644 // If this type crosses an eightbyte boundary, it should be
645 // split.
646 if (OffsetBase && OffsetBase != 64)
647 Hi = Lo;
648 } else if (Size == 128) {
649 Lo = SSE;
650 Hi = SSEUp;
651 }
652 } else if (const ComplexType *CT = Ty->getAsComplexType()) {
653 QualType ET = Context.getCanonicalType(CT->getElementType());
654
655 uint64_t Size = Context.getTypeSize(Ty);
656 if (ET->isIntegralType()) {
657 if (Size <= 64)
658 Current = Integer;
659 else if (Size <= 128)
660 Lo = Hi = Integer;
661 } else if (ET == Context.FloatTy)
662 Current = SSE;
663 else if (ET == Context.DoubleTy)
664 Lo = Hi = SSE;
665 else if (ET == Context.LongDoubleTy)
666 Current = ComplexX87;
667
668 // If this complex type crosses an eightbyte boundary then it
669 // should be split.
670 uint64_t EB_Real = (OffsetBase) / 64;
671 uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
672 if (Hi == NoClass && EB_Real != EB_Imag)
673 Hi = Lo;
674 } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
675 // Arrays are treated like structures.
676
677 uint64_t Size = Context.getTypeSize(Ty);
678
679 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
680 // than two eightbytes, ..., it has class MEMORY.
681 if (Size > 128)
682 return;
683
684 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
685 // fields, it has class MEMORY.
686 //
687 // Only need to check alignment of array base.
688 if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
689 return;
690
691 // Otherwise implement simplified merge. We could be smarter about
692 // this, but it isn't worth it and would be harder to verify.
693 Current = NoClass;
694 uint64_t EltSize = Context.getTypeSize(AT->getElementType());
695 uint64_t ArraySize = AT->getSize().getZExtValue();
696 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
697 Class FieldLo, FieldHi;
698 classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
699 Lo = merge(Lo, FieldLo);
700 Hi = merge(Hi, FieldHi);
701 if (Lo == Memory || Hi == Memory)
702 break;
703 }
704
705 // Do post merger cleanup (see below). Only case we worry about is Memory.
706 if (Hi == Memory)
707 Lo = Memory;
708 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
Ted Kremenek6217b802009-07-29 21:53:49 +0000709 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000710 uint64_t Size = Context.getTypeSize(Ty);
711
712 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
713 // than two eightbytes, ..., it has class MEMORY.
714 if (Size > 128)
715 return;
716
717 const RecordDecl *RD = RT->getDecl();
718
719 // Assume variable sized types are passed in memory.
720 if (RD->hasFlexibleArrayMember())
721 return;
722
723 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
724
725 // Reset Lo class, this will be recomputed.
726 Current = NoClass;
727 unsigned idx = 0;
Argyrios Kyrtzidis17945a02009-06-30 02:36:12 +0000728 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
729 i != e; ++i, ++idx) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000730 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
731 bool BitField = i->isBitField();
732
733 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
734 // fields, it has class MEMORY.
735 //
736 // Note, skip this test for bit-fields, see below.
737 if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
738 Lo = Memory;
739 return;
740 }
741
742 // Classify this field.
743 //
744 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
745 // exceeds a single eightbyte, each is classified
746 // separately. Each eightbyte gets initialized to class
747 // NO_CLASS.
748 Class FieldLo, FieldHi;
749
750 // Bit-fields require special handling, they do not force the
751 // structure to be passed in memory even if unaligned, and
752 // therefore they can straddle an eightbyte.
753 if (BitField) {
754 // Ignore padding bit-fields.
755 if (i->isUnnamedBitfield())
756 continue;
757
758 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
759 uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
760
761 uint64_t EB_Lo = Offset / 64;
762 uint64_t EB_Hi = (Offset + Size - 1) / 64;
763 FieldLo = FieldHi = NoClass;
764 if (EB_Lo) {
765 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
766 FieldLo = NoClass;
767 FieldHi = Integer;
768 } else {
769 FieldLo = Integer;
770 FieldHi = EB_Hi ? Integer : NoClass;
771 }
772 } else
773 classify(i->getType(), Context, Offset, FieldLo, FieldHi);
774 Lo = merge(Lo, FieldLo);
775 Hi = merge(Hi, FieldHi);
776 if (Lo == Memory || Hi == Memory)
777 break;
778 }
779
780 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
781 //
782 // (a) If one of the classes is MEMORY, the whole argument is
783 // passed in memory.
784 //
785 // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
786
787 // The first of these conditions is guaranteed by how we implement
788 // the merge (just bail).
789 //
790 // The second condition occurs in the case of unions; for example
791 // union { _Complex double; unsigned; }.
792 if (Hi == Memory)
793 Lo = Memory;
794 if (Hi == SSEUp && Lo != SSE)
795 Hi = SSE;
796 }
797}
798
799ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
800 const llvm::Type *CoerceTo,
801 ASTContext &Context) const {
Owen Anderson0032b272009-08-13 21:57:51 +0000802 if (CoerceTo == llvm::Type::getInt64Ty(CoerceTo->getContext())) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000803 // Integer and pointer types will end up in a general purpose
804 // register.
805 if (Ty->isIntegralType() || Ty->isPointerType())
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +0000806 return (Ty->isPromotableIntegerType() ?
807 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Owen Anderson0032b272009-08-13 21:57:51 +0000808 } else if (CoerceTo == llvm::Type::getDoubleTy(CoerceTo->getContext())) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000809 // FIXME: It would probably be better to make CGFunctionInfo only map using
810 // canonical types than to canonize here.
811 QualType CTy = Context.getCanonicalType(Ty);
812
813 // Float and double end up in a single SSE reg.
814 if (CTy == Context.FloatTy || CTy == Context.DoubleTy)
815 return ABIArgInfo::getDirect();
816
817 }
818
819 return ABIArgInfo::getCoerce(CoerceTo);
820}
821
822ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
823 ASTContext &Context) const {
824 // If this is a scalar LLVM value then assume LLVM will pass it in the right
825 // place naturally.
826 if (!CodeGenFunction::hasAggregateLLVMType(Ty))
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +0000827 return (Ty->isPromotableIntegerType() ?
828 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000829
830 // FIXME: Set alignment correctly.
831 return ABIArgInfo::getIndirect(0);
832}
833
834ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000835 ASTContext &Context,
836 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000837 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
838 // classification algorithm.
839 X86_64ABIInfo::Class Lo, Hi;
840 classify(RetTy, Context, 0, Lo, Hi);
841
842 // Check some invariants.
843 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
844 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
845 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
846
847 const llvm::Type *ResType = 0;
848 switch (Lo) {
849 case NoClass:
850 return ABIArgInfo::getIgnore();
851
852 case SSEUp:
853 case X87Up:
854 assert(0 && "Invalid classification for lo word.");
855
856 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
857 // hidden argument.
858 case Memory:
859 return getIndirectResult(RetTy, Context);
860
861 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
862 // available register of the sequence %rax, %rdx is used.
863 case Integer:
Owen Anderson0032b272009-08-13 21:57:51 +0000864 ResType = llvm::Type::getInt64Ty(VMContext); break;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000865
866 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
867 // available SSE register of the sequence %xmm0, %xmm1 is used.
868 case SSE:
Owen Anderson0032b272009-08-13 21:57:51 +0000869 ResType = llvm::Type::getDoubleTy(VMContext); break;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000870
871 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
872 // returned on the X87 stack in %st0 as 80-bit x87 number.
873 case X87:
Owen Anderson0032b272009-08-13 21:57:51 +0000874 ResType = llvm::Type::getX86_FP80Ty(VMContext); break;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000875
876 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
877 // part of the value is returned in %st0 and the imaginary part in
878 // %st1.
879 case ComplexX87:
880 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
Owen Anderson0032b272009-08-13 21:57:51 +0000881 ResType = llvm::StructType::get(VMContext, llvm::Type::getX86_FP80Ty(VMContext),
882 llvm::Type::getX86_FP80Ty(VMContext),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000883 NULL);
884 break;
885 }
886
887 switch (Hi) {
888 // Memory was handled previously and X87 should
889 // never occur as a hi class.
890 case Memory:
891 case X87:
892 assert(0 && "Invalid classification for hi word.");
893
894 case ComplexX87: // Previously handled.
895 case NoClass: break;
896
897 case Integer:
Owen Anderson47a434f2009-08-05 23:18:46 +0000898 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson0032b272009-08-13 21:57:51 +0000899 llvm::Type::getInt64Ty(VMContext), NULL);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000900 break;
901 case SSE:
Owen Anderson47a434f2009-08-05 23:18:46 +0000902 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson0032b272009-08-13 21:57:51 +0000903 llvm::Type::getDoubleTy(VMContext), NULL);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000904 break;
905
906 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
907 // is passed in the upper half of the last used SSE register.
908 //
909 // SSEUP should always be preceeded by SSE, just widen.
910 case SSEUp:
911 assert(Lo == SSE && "Unexpected SSEUp classification.");
Owen Anderson0032b272009-08-13 21:57:51 +0000912 ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000913 break;
914
915 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
916 // returned together with the previous X87 value in %st0.
917 case X87Up:
918 // If X87Up is preceeded by X87, we don't need to do
919 // anything. However, in some cases with unions it may not be
920 // preceeded by X87. In such situations we follow gcc and pass the
921 // extra bits in an SSE reg.
922 if (Lo != X87)
Owen Anderson47a434f2009-08-05 23:18:46 +0000923 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson0032b272009-08-13 21:57:51 +0000924 llvm::Type::getDoubleTy(VMContext), NULL);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000925 break;
926 }
927
928 return getCoerceResult(RetTy, ResType, Context);
929}
930
931ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
Owen Andersona1cf15f2009-07-14 23:10:40 +0000932 llvm::LLVMContext &VMContext,
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000933 unsigned &neededInt,
934 unsigned &neededSSE) const {
935 X86_64ABIInfo::Class Lo, Hi;
936 classify(Ty, Context, 0, Lo, Hi);
937
938 // Check some invariants.
939 // FIXME: Enforce these by construction.
940 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
941 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
942 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
943
944 neededInt = 0;
945 neededSSE = 0;
946 const llvm::Type *ResType = 0;
947 switch (Lo) {
948 case NoClass:
949 return ABIArgInfo::getIgnore();
950
951 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
952 // on the stack.
953 case Memory:
954
955 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
956 // COMPLEX_X87, it is passed in memory.
957 case X87:
958 case ComplexX87:
959 return getIndirectResult(Ty, Context);
960
961 case SSEUp:
962 case X87Up:
963 assert(0 && "Invalid classification for lo word.");
964
965 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
966 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
967 // and %r9 is used.
968 case Integer:
969 ++neededInt;
Owen Anderson0032b272009-08-13 21:57:51 +0000970 ResType = llvm::Type::getInt64Ty(VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000971 break;
972
973 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
974 // available SSE register is used, the registers are taken in the
975 // order from %xmm0 to %xmm7.
976 case SSE:
977 ++neededSSE;
Owen Anderson0032b272009-08-13 21:57:51 +0000978 ResType = llvm::Type::getDoubleTy(VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000979 break;
980 }
981
982 switch (Hi) {
983 // Memory was handled previously, ComplexX87 and X87 should
984 // never occur as hi classes, and X87Up must be preceed by X87,
985 // which is passed in memory.
986 case Memory:
987 case X87:
988 case ComplexX87:
989 assert(0 && "Invalid classification for hi word.");
990 break;
991
992 case NoClass: break;
993 case Integer:
Owen Anderson47a434f2009-08-05 23:18:46 +0000994 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson0032b272009-08-13 21:57:51 +0000995 llvm::Type::getInt64Ty(VMContext), NULL);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000996 ++neededInt;
997 break;
998
999 // X87Up generally doesn't occur here (long double is passed in
1000 // memory), except in situations involving unions.
1001 case X87Up:
1002 case SSE:
Owen Anderson47a434f2009-08-05 23:18:46 +00001003 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson0032b272009-08-13 21:57:51 +00001004 llvm::Type::getDoubleTy(VMContext), NULL);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001005 ++neededSSE;
1006 break;
1007
1008 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
1009 // eightbyte is passed in the upper half of the last used SSE
1010 // register.
1011 case SSEUp:
1012 assert(Lo == SSE && "Unexpected SSEUp classification.");
Owen Anderson0032b272009-08-13 21:57:51 +00001013 ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001014 break;
1015 }
1016
1017 return getCoerceResult(Ty, ResType, Context);
1018}
1019
Owen Andersona1cf15f2009-07-14 23:10:40 +00001020void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1021 llvm::LLVMContext &VMContext) const {
1022 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
1023 Context, VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001024
1025 // Keep track of the number of assigned registers.
1026 unsigned freeIntRegs = 6, freeSSERegs = 8;
1027
1028 // If the return value is indirect, then the hidden argument is consuming one
1029 // integer register.
1030 if (FI.getReturnInfo().isIndirect())
1031 --freeIntRegs;
1032
1033 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
1034 // get assigned (in left-to-right order) for passing as follows...
1035 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1036 it != ie; ++it) {
1037 unsigned neededInt, neededSSE;
Owen Andersona1cf15f2009-07-14 23:10:40 +00001038 it->info = classifyArgumentType(it->type, Context, VMContext,
1039 neededInt, neededSSE);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001040
1041 // AMD64-ABI 3.2.3p3: If there are no registers available for any
1042 // eightbyte of an argument, the whole argument is passed on the
1043 // stack. If registers have already been assigned for some
1044 // eightbytes of such an argument, the assignments get reverted.
1045 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
1046 freeIntRegs -= neededInt;
1047 freeSSERegs -= neededSSE;
1048 } else {
1049 it->info = getIndirectResult(it->type, Context);
1050 }
1051 }
1052}
1053
1054static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
1055 QualType Ty,
1056 CodeGenFunction &CGF) {
1057 llvm::Value *overflow_arg_area_p =
1058 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
1059 llvm::Value *overflow_arg_area =
1060 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
1061
1062 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
1063 // byte boundary if alignment needed by type exceeds 8 byte boundary.
1064 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
1065 if (Align > 8) {
1066 // Note that we follow the ABI & gcc here, even though the type
1067 // could in theory have an alignment greater than 16. This case
1068 // shouldn't ever matter in practice.
1069
1070 // overflow_arg_area = (overflow_arg_area + 15) & ~15;
Owen Anderson0032b272009-08-13 21:57:51 +00001071 llvm::Value *Offset =
1072 llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), 15);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001073 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
1074 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
Owen Anderson0032b272009-08-13 21:57:51 +00001075 llvm::Type::getInt64Ty(CGF.getLLVMContext()));
1076 llvm::Value *Mask = llvm::ConstantInt::get(
1077 llvm::Type::getInt64Ty(CGF.getLLVMContext()), ~15LL);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001078 overflow_arg_area =
1079 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1080 overflow_arg_area->getType(),
1081 "overflow_arg_area.align");
1082 }
1083
1084 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
1085 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1086 llvm::Value *Res =
1087 CGF.Builder.CreateBitCast(overflow_arg_area,
Owen Anderson96e0fc72009-07-29 22:16:19 +00001088 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001089
1090 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
1091 // l->overflow_arg_area + sizeof(type).
1092 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
1093 // an 8 byte boundary.
1094
1095 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
Owen Anderson0032b272009-08-13 21:57:51 +00001096 llvm::Value *Offset =
1097 llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001098 (SizeInBytes + 7) & ~7);
1099 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
1100 "overflow_arg_area.next");
1101 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
1102
1103 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
1104 return Res;
1105}
1106
1107llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1108 CodeGenFunction &CGF) const {
Owen Andersona1cf15f2009-07-14 23:10:40 +00001109 llvm::LLVMContext &VMContext = CGF.getLLVMContext();
1110
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001111 // Assume that va_list type is correct; should be pointer to LLVM type:
1112 // struct {
1113 // i32 gp_offset;
1114 // i32 fp_offset;
1115 // i8* overflow_arg_area;
1116 // i8* reg_save_area;
1117 // };
1118 unsigned neededInt, neededSSE;
Owen Andersona1cf15f2009-07-14 23:10:40 +00001119 ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), VMContext,
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001120 neededInt, neededSSE);
1121
1122 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
1123 // in the registers. If not go to step 7.
1124 if (!neededInt && !neededSSE)
1125 return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1126
1127 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
1128 // general purpose registers needed to pass type and num_fp to hold
1129 // the number of floating point registers needed.
1130
1131 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
1132 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
1133 // l->fp_offset > 304 - num_fp * 16 go to step 7.
1134 //
1135 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
1136 // register save space).
1137
1138 llvm::Value *InRegs = 0;
1139 llvm::Value *gp_offset_p = 0, *gp_offset = 0;
1140 llvm::Value *fp_offset_p = 0, *fp_offset = 0;
1141 if (neededInt) {
1142 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
1143 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
1144 InRegs =
1145 CGF.Builder.CreateICmpULE(gp_offset,
Owen Anderson0032b272009-08-13 21:57:51 +00001146 llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001147 48 - neededInt * 8),
1148 "fits_in_gp");
1149 }
1150
1151 if (neededSSE) {
1152 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
1153 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
1154 llvm::Value *FitsInFP =
1155 CGF.Builder.CreateICmpULE(fp_offset,
Owen Anderson0032b272009-08-13 21:57:51 +00001156 llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001157 176 - neededSSE * 16),
1158 "fits_in_fp");
1159 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
1160 }
1161
1162 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
1163 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
1164 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
1165 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
1166
1167 // Emit code to load the value if it was passed in registers.
1168
1169 CGF.EmitBlock(InRegBlock);
1170
1171 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
1172 // an offset of l->gp_offset and/or l->fp_offset. This may require
1173 // copying to a temporary location in case the parameter is passed
1174 // in different register classes or requires an alignment greater
1175 // than 8 for general purpose registers and 16 for XMM registers.
1176 //
1177 // FIXME: This really results in shameful code when we end up needing to
1178 // collect arguments from different places; often what should result in a
1179 // simple assembling of a structure from scattered addresses has many more
1180 // loads than necessary. Can we clean this up?
1181 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1182 llvm::Value *RegAddr =
1183 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
1184 "reg_save_area");
1185 if (neededInt && neededSSE) {
1186 // FIXME: Cleanup.
1187 assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
1188 const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
1189 llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
1190 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
1191 const llvm::Type *TyLo = ST->getElementType(0);
1192 const llvm::Type *TyHi = ST->getElementType(1);
1193 assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) &&
1194 "Unexpected ABI info for mixed regs");
Owen Anderson96e0fc72009-07-29 22:16:19 +00001195 const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
1196 const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001197 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1198 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1199 llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr;
1200 llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr;
1201 llvm::Value *V =
1202 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
1203 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1204 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
1205 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1206
Owen Andersona1cf15f2009-07-14 23:10:40 +00001207 RegAddr = CGF.Builder.CreateBitCast(Tmp,
Owen Anderson96e0fc72009-07-29 22:16:19 +00001208 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001209 } else if (neededInt) {
1210 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1211 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
Owen Anderson96e0fc72009-07-29 22:16:19 +00001212 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001213 } else {
1214 if (neededSSE == 1) {
1215 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1216 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
Owen Anderson96e0fc72009-07-29 22:16:19 +00001217 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001218 } else {
1219 assert(neededSSE == 2 && "Invalid number of needed registers!");
1220 // SSE registers are spaced 16 bytes apart in the register save
1221 // area, we need to collect the two eightbytes together.
1222 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1223 llvm::Value *RegAddrHi =
1224 CGF.Builder.CreateGEP(RegAddrLo,
Owen Anderson0032b272009-08-13 21:57:51 +00001225 llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 16));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001226 const llvm::Type *DblPtrTy =
Owen Anderson0032b272009-08-13 21:57:51 +00001227 llvm::PointerType::getUnqual(llvm::Type::getDoubleTy(VMContext));
Owen Anderson47a434f2009-08-05 23:18:46 +00001228 const llvm::StructType *ST = llvm::StructType::get(VMContext,
Owen Anderson0032b272009-08-13 21:57:51 +00001229 llvm::Type::getDoubleTy(VMContext),
1230 llvm::Type::getDoubleTy(VMContext),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001231 NULL);
1232 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
1233 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
1234 DblPtrTy));
1235 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1236 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
1237 DblPtrTy));
1238 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1239 RegAddr = CGF.Builder.CreateBitCast(Tmp,
Owen Anderson96e0fc72009-07-29 22:16:19 +00001240 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001241 }
1242 }
1243
1244 // AMD64-ABI 3.5.7p5: Step 5. Set:
1245 // l->gp_offset = l->gp_offset + num_gp * 8
1246 // l->fp_offset = l->fp_offset + num_fp * 16.
1247 if (neededInt) {
Owen Anderson0032b272009-08-13 21:57:51 +00001248 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001249 neededInt * 8);
1250 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
1251 gp_offset_p);
1252 }
1253 if (neededSSE) {
Owen Anderson0032b272009-08-13 21:57:51 +00001254 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001255 neededSSE * 16);
1256 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
1257 fp_offset_p);
1258 }
1259 CGF.EmitBranch(ContBlock);
1260
1261 // Emit code to load the value if it was passed in memory.
1262
1263 CGF.EmitBlock(InMemBlock);
1264 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1265
1266 // Return the appropriate result.
1267
1268 CGF.EmitBlock(ContBlock);
1269 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
1270 "vaarg.addr");
1271 ResAddr->reserveOperandSpace(2);
1272 ResAddr->addIncoming(RegAddr, InRegBlock);
1273 ResAddr->addIncoming(MemAddr, InMemBlock);
1274
1275 return ResAddr;
1276}
1277
1278// ABI Info for PIC16
1279class PIC16ABIInfo : public ABIInfo {
1280 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001281 ASTContext &Context,
1282 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001283
1284 ABIArgInfo classifyArgumentType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001285 ASTContext &Context,
1286 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001287
Owen Andersona1cf15f2009-07-14 23:10:40 +00001288 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1289 llvm::LLVMContext &VMContext) const {
1290 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
1291 VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001292 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1293 it != ie; ++it)
Owen Andersona1cf15f2009-07-14 23:10:40 +00001294 it->info = classifyArgumentType(it->type, Context, VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001295 }
1296
1297 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1298 CodeGenFunction &CGF) const;
1299
1300};
1301
1302ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001303 ASTContext &Context,
1304 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001305 if (RetTy->isVoidType()) {
1306 return ABIArgInfo::getIgnore();
1307 } else {
1308 return ABIArgInfo::getDirect();
1309 }
1310}
1311
1312ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001313 ASTContext &Context,
1314 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001315 return ABIArgInfo::getDirect();
1316}
1317
1318llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1319 CodeGenFunction &CGF) const {
1320 return 0;
1321}
1322
1323class ARMABIInfo : public ABIInfo {
1324 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001325 ASTContext &Context,
1326 llvm::LLVMContext &VMCOntext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001327
1328 ABIArgInfo classifyArgumentType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001329 ASTContext &Context,
1330 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001331
Owen Andersona1cf15f2009-07-14 23:10:40 +00001332 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1333 llvm::LLVMContext &VMContext) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001334
1335 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1336 CodeGenFunction &CGF) const;
1337};
1338
Owen Andersona1cf15f2009-07-14 23:10:40 +00001339void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1340 llvm::LLVMContext &VMContext) const {
1341 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
1342 VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001343 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1344 it != ie; ++it) {
Owen Andersona1cf15f2009-07-14 23:10:40 +00001345 it->info = classifyArgumentType(it->type, Context, VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001346 }
1347}
1348
1349ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001350 ASTContext &Context,
1351 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001352 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +00001353 return (Ty->isPromotableIntegerType() ?
1354 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001355 }
1356 // FIXME: This is kind of nasty... but there isn't much choice because the ARM
1357 // backend doesn't support byval.
1358 // FIXME: This doesn't handle alignment > 64 bits.
1359 const llvm::Type* ElemTy;
1360 unsigned SizeRegs;
1361 if (Context.getTypeAlign(Ty) > 32) {
Owen Anderson0032b272009-08-13 21:57:51 +00001362 ElemTy = llvm::Type::getInt64Ty(VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001363 SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
1364 } else {
Owen Anderson0032b272009-08-13 21:57:51 +00001365 ElemTy = llvm::Type::getInt32Ty(VMContext);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001366 SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
1367 }
1368 std::vector<const llvm::Type*> LLVMFields;
Owen Anderson96e0fc72009-07-29 22:16:19 +00001369 LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
Owen Anderson47a434f2009-08-05 23:18:46 +00001370 const llvm::Type* STy = llvm::StructType::get(VMContext, LLVMFields, true);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001371 return ABIArgInfo::getCoerce(STy);
1372}
1373
1374ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001375 ASTContext &Context,
1376 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001377 if (RetTy->isVoidType()) {
1378 return ABIArgInfo::getIgnore();
1379 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1380 // Aggregates <= 4 bytes are returned in r0; other aggregates
1381 // are returned indirectly.
1382 uint64_t Size = Context.getTypeSize(RetTy);
1383 if (Size <= 32)
Owen Anderson0032b272009-08-13 21:57:51 +00001384 return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001385 return ABIArgInfo::getIndirect(0);
1386 } else {
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +00001387 return (RetTy->isPromotableIntegerType() ?
1388 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001389 }
1390}
1391
1392llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1393 CodeGenFunction &CGF) const {
1394 // FIXME: Need to handle alignment
Owen Anderson0032b272009-08-13 21:57:51 +00001395 const llvm::Type *BP =
1396 llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(CGF.getLLVMContext()));
Owen Anderson96e0fc72009-07-29 22:16:19 +00001397 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001398
1399 CGBuilderTy &Builder = CGF.Builder;
1400 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1401 "ap");
1402 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
1403 llvm::Type *PTy =
Owen Anderson96e0fc72009-07-29 22:16:19 +00001404 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001405 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1406
1407 uint64_t Offset =
1408 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
1409 llvm::Value *NextAddr =
Owen Anderson0032b272009-08-13 21:57:51 +00001410 Builder.CreateGEP(Addr, llvm::ConstantInt::get(
1411 llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001412 "ap.next");
1413 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1414
1415 return AddrTyped;
1416}
1417
1418ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001419 ASTContext &Context,
1420 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001421 if (RetTy->isVoidType()) {
1422 return ABIArgInfo::getIgnore();
1423 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1424 return ABIArgInfo::getIndirect(0);
1425 } else {
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +00001426 return (RetTy->isPromotableIntegerType() ?
1427 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001428 }
1429}
1430
Anton Korobeynikov89e887f2009-07-16 20:09:57 +00001431namespace {
1432class SystemZABIInfo : public ABIInfo {
1433 bool isPromotableIntegerType(QualType Ty) const;
1434
1435 ABIArgInfo classifyReturnType(QualType RetTy, ASTContext &Context,
1436 llvm::LLVMContext &VMContext) const;
1437
1438 ABIArgInfo classifyArgumentType(QualType RetTy, ASTContext &Context,
1439 llvm::LLVMContext &VMContext) const;
1440
1441 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1442 llvm::LLVMContext &VMContext) const {
1443 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
1444 Context, VMContext);
1445 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1446 it != ie; ++it)
1447 it->info = classifyArgumentType(it->type, Context, VMContext);
1448 }
1449
1450 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1451 CodeGenFunction &CGF) const;
1452};
1453}
1454
1455bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
1456 // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended.
1457 if (const BuiltinType *BT = Ty->getAsBuiltinType())
1458 switch (BT->getKind()) {
1459 case BuiltinType::Bool:
1460 case BuiltinType::Char_S:
1461 case BuiltinType::Char_U:
1462 case BuiltinType::SChar:
1463 case BuiltinType::UChar:
1464 case BuiltinType::Short:
1465 case BuiltinType::UShort:
1466 case BuiltinType::Int:
1467 case BuiltinType::UInt:
1468 return true;
1469 default:
1470 return false;
1471 }
1472 return false;
1473}
1474
1475llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1476 CodeGenFunction &CGF) const {
1477 // FIXME: Implement
1478 return 0;
1479}
1480
1481
1482ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy,
1483 ASTContext &Context,
1484 llvm::LLVMContext &VMContext) const {
1485 if (RetTy->isVoidType()) {
1486 return ABIArgInfo::getIgnore();
1487 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1488 return ABIArgInfo::getIndirect(0);
1489 } else {
1490 return (isPromotableIntegerType(RetTy) ?
1491 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1492 }
1493}
1494
1495ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty,
1496 ASTContext &Context,
1497 llvm::LLVMContext &VMContext) const {
1498 if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
1499 return ABIArgInfo::getIndirect(0);
1500 } else {
1501 return (isPromotableIntegerType(Ty) ?
1502 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1503 }
1504}
1505
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001506ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
Owen Andersona1cf15f2009-07-14 23:10:40 +00001507 ASTContext &Context,
1508 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001509 if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
1510 return ABIArgInfo::getIndirect(0);
1511 } else {
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +00001512 return (Ty->isPromotableIntegerType() ?
1513 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001514 }
1515}
1516
1517llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1518 CodeGenFunction &CGF) const {
1519 return 0;
1520}
1521
1522const ABIInfo &CodeGenTypes::getABIInfo() const {
1523 if (TheABIInfo)
1524 return *TheABIInfo;
1525
Daniel Dunbar2c0843f2009-08-24 08:52:16 +00001526 // For now we just cache the ABIInfo in CodeGenTypes and don't free it.
1527
Daniel Dunbar1752ee42009-08-24 09:10:05 +00001528 const llvm::Triple &Triple(getContext().Target.getTriple());
1529 switch (Triple.getArch()) {
Daniel Dunbar2c0843f2009-08-24 08:52:16 +00001530 default:
1531 return *(TheABIInfo = new DefaultABIInfo);
1532
Daniel Dunbar1752ee42009-08-24 09:10:05 +00001533 case llvm::Triple::x86:
1534 if (Triple.getOS() == llvm::Triple::Darwin)
Daniel Dunbar2c0843f2009-08-24 08:52:16 +00001535 return *(TheABIInfo = new X86_32ABIInfo(Context, true, true));
1536
Daniel Dunbar1752ee42009-08-24 09:10:05 +00001537 switch (Triple.getOS()) {
Daniel Dunbar2c0843f2009-08-24 08:52:16 +00001538 case llvm::Triple::Cygwin:
1539 case llvm::Triple::DragonFly:
1540 case llvm::Triple::MinGW32:
1541 case llvm::Triple::MinGW64:
1542 case llvm::Triple::NetBSD:
1543 case llvm::Triple::OpenBSD:
1544 return *(TheABIInfo = new X86_32ABIInfo(Context, false, true));
1545
1546 default:
1547 return *(TheABIInfo = new X86_32ABIInfo(Context, false, false));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001548 }
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001549
Daniel Dunbar2c0843f2009-08-24 08:52:16 +00001550 case llvm::Triple::x86_64:
1551 return *(TheABIInfo = new X86_64ABIInfo());
1552
1553 case llvm::Triple::arm:
1554 // FIXME: Support for OABI?
1555 return *(TheABIInfo = new ARMABIInfo());
1556
1557 case llvm::Triple::pic16:
1558 return *(TheABIInfo = new PIC16ABIInfo());
1559
1560 case llvm::Triple::systemz:
1561 return *(TheABIInfo = new SystemZABIInfo());
1562 }
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001563}