blob: 1ebad5dd5c2461e715d85cff22a4ffcafb802048 [file] [log] [blame]
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001//===---- TargetABIInfo.cpp - Encapsulate target ABI details ----*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "ABIInfo.h"
16#include "CodeGenFunction.h"
Anders Carlsson15b73de2009-07-18 19:43:29 +000017#include "clang/AST/RecordLayout.h"
Anton Korobeynikov244360d2009-06-05 22:08:42 +000018#include "llvm/Type.h"
Daniel Dunbare3532f82009-08-24 08:52:16 +000019#include "llvm/ADT/Triple.h"
Torok Edwindb714922009-08-24 13:25:12 +000020#include <cstdio>
Anton Korobeynikov244360d2009-06-05 22:08:42 +000021
22using namespace clang;
23using namespace CodeGen;
24
25ABIInfo::~ABIInfo() {}
26
27void ABIArgInfo::dump() const {
28 fprintf(stderr, "(ABIArgInfo Kind=");
29 switch (TheKind) {
30 case Direct:
31 fprintf(stderr, "Direct");
32 break;
Anton Korobeynikov18adbf52009-06-06 09:36:29 +000033 case Extend:
34 fprintf(stderr, "Extend");
35 break;
Anton Korobeynikov244360d2009-06-05 22:08:42 +000036 case Ignore:
37 fprintf(stderr, "Ignore");
38 break;
39 case Coerce:
40 fprintf(stderr, "Coerce Type=");
41 getCoerceToType()->print(llvm::errs());
42 break;
43 case Indirect:
44 fprintf(stderr, "Indirect Align=%d", getIndirectAlign());
45 break;
46 case Expand:
47 fprintf(stderr, "Expand");
48 break;
49 }
50 fprintf(stderr, ")\n");
51}
52
53static bool isEmptyRecord(ASTContext &Context, QualType T);
54
55/// isEmptyField - Return true iff a the field is "empty", that is it
56/// is an unnamed bit-field or an (array of) empty record(s).
57static bool isEmptyField(ASTContext &Context, const FieldDecl *FD) {
58 if (FD->isUnnamedBitfield())
59 return true;
60
61 QualType FT = FD->getType();
62 // Constant arrays of empty records count as empty, strip them off.
63 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
64 FT = AT->getElementType();
65
66 return isEmptyRecord(Context, FT);
67}
68
69/// isEmptyRecord - Return true iff a structure contains only empty
70/// fields. Note that a structure with a flexible array member is not
71/// considered empty.
72static bool isEmptyRecord(ASTContext &Context, QualType T) {
Ted Kremenekc23c7e62009-07-29 21:53:49 +000073 const RecordType *RT = T->getAs<RecordType>();
Anton Korobeynikov244360d2009-06-05 22:08:42 +000074 if (!RT)
75 return 0;
76 const RecordDecl *RD = RT->getDecl();
77 if (RD->hasFlexibleArrayMember())
78 return false;
Argyrios Kyrtzidiscfbfe782009-06-30 02:36:12 +000079 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
80 i != e; ++i)
Anton Korobeynikov244360d2009-06-05 22:08:42 +000081 if (!isEmptyField(Context, *i))
82 return false;
83 return true;
84}
85
86/// isSingleElementStruct - Determine if a structure is a "single
87/// element struct", i.e. it has exactly one non-empty field or
88/// exactly one field which is itself a single element
89/// struct. Structures with flexible array members are never
90/// considered single element structs.
91///
92/// \return The field declaration for the single non-empty field, if
93/// it exists.
94static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
95 const RecordType *RT = T->getAsStructureType();
96 if (!RT)
97 return 0;
98
99 const RecordDecl *RD = RT->getDecl();
100 if (RD->hasFlexibleArrayMember())
101 return 0;
102
103 const Type *Found = 0;
Argyrios Kyrtzidiscfbfe782009-06-30 02:36:12 +0000104 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
105 i != e; ++i) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000106 const FieldDecl *FD = *i;
107 QualType FT = FD->getType();
108
109 // Ignore empty fields.
110 if (isEmptyField(Context, FD))
111 continue;
112
113 // If we already found an element then this isn't a single-element
114 // struct.
115 if (Found)
116 return 0;
117
118 // Treat single element arrays as the element.
119 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
120 if (AT->getSize().getZExtValue() != 1)
121 break;
122 FT = AT->getElementType();
123 }
124
125 if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
126 Found = FT.getTypePtr();
127 } else {
128 Found = isSingleElementStruct(FT, Context);
129 if (!Found)
130 return 0;
131 }
132 }
133
134 return Found;
135}
136
137static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
138 if (!Ty->getAsBuiltinType() && !Ty->isPointerType())
139 return false;
140
141 uint64_t Size = Context.getTypeSize(Ty);
142 return Size == 32 || Size == 64;
143}
144
145static bool areAllFields32Or64BitBasicType(const RecordDecl *RD,
146 ASTContext &Context) {
Argyrios Kyrtzidiscfbfe782009-06-30 02:36:12 +0000147 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
148 i != e; ++i) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000149 const FieldDecl *FD = *i;
150
151 if (!is32Or64BitBasicType(FD->getType(), Context))
152 return false;
153
154 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
155 // how to expand them yet, and the predicate for telling if a bitfield still
156 // counts as "basic" is more complicated than what we were doing previously.
157 if (FD->isBitField())
158 return false;
159 }
160
161 return true;
162}
163
Eli Friedman3192cc82009-06-13 21:37:10 +0000164static bool typeContainsSSEVector(const RecordDecl *RD, ASTContext &Context) {
Argyrios Kyrtzidiscfbfe782009-06-30 02:36:12 +0000165 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
166 i != e; ++i) {
Eli Friedman3192cc82009-06-13 21:37:10 +0000167 const FieldDecl *FD = *i;
168
169 if (FD->getType()->isVectorType() &&
170 Context.getTypeSize(FD->getType()) >= 128)
171 return true;
172
Ted Kremenekc23c7e62009-07-29 21:53:49 +0000173 if (const RecordType* RT = FD->getType()->getAs<RecordType>())
Eli Friedman3192cc82009-06-13 21:37:10 +0000174 if (typeContainsSSEVector(RT->getDecl(), Context))
175 return true;
176 }
177
178 return false;
179}
180
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000181namespace {
182/// DefaultABIInfo - The default implementation for ABI specific
183/// details. This implementation provides information which results in
184/// self-consistent and sensible LLVM IR generation, but does not
185/// conform to any particular ABI.
186class DefaultABIInfo : public ABIInfo {
187 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +0000188 ASTContext &Context,
189 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000190
191 ABIArgInfo classifyArgumentType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +0000192 ASTContext &Context,
193 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000194
Owen Anderson170229f2009-07-14 23:10:40 +0000195 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
196 llvm::LLVMContext &VMContext) const {
197 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
198 VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000199 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
200 it != ie; ++it)
Owen Anderson170229f2009-07-14 23:10:40 +0000201 it->info = classifyArgumentType(it->type, Context, VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000202 }
203
204 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
205 CodeGenFunction &CGF) const;
206};
207
208/// X86_32ABIInfo - The X86-32 ABI information.
209class X86_32ABIInfo : public ABIInfo {
210 ASTContext &Context;
David Chisnallde3a0692009-08-17 23:08:21 +0000211 bool IsDarwinVectorABI;
212 bool IsSmallStructInRegABI;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000213
214 static bool isRegisterSize(unsigned Size) {
215 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
216 }
217
218 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
219
Eli Friedman3192cc82009-06-13 21:37:10 +0000220 static unsigned getIndirectArgumentAlignment(QualType Ty,
221 ASTContext &Context);
222
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000223public:
224 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +0000225 ASTContext &Context,
226 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000227
228 ABIArgInfo classifyArgumentType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +0000229 ASTContext &Context,
230 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000231
Owen Anderson170229f2009-07-14 23:10:40 +0000232 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
233 llvm::LLVMContext &VMContext) const {
234 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
235 VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000236 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
237 it != ie; ++it)
Owen Anderson170229f2009-07-14 23:10:40 +0000238 it->info = classifyArgumentType(it->type, Context, VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000239 }
240
241 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
242 CodeGenFunction &CGF) const;
243
David Chisnallde3a0692009-08-17 23:08:21 +0000244 X86_32ABIInfo(ASTContext &Context, bool d, bool p)
Mike Stump11289f42009-09-09 15:08:12 +0000245 : ABIInfo(), Context(Context), IsDarwinVectorABI(d),
David Chisnallde3a0692009-08-17 23:08:21 +0000246 IsSmallStructInRegABI(p) {}
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000247};
248}
249
250
251/// shouldReturnTypeInRegister - Determine if the given type should be
252/// passed in a register (for the Darwin ABI).
253bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
254 ASTContext &Context) {
255 uint64_t Size = Context.getTypeSize(Ty);
256
257 // Type must be register sized.
258 if (!isRegisterSize(Size))
259 return false;
260
261 if (Ty->isVectorType()) {
262 // 64- and 128- bit vectors inside structures are not returned in
263 // registers.
264 if (Size == 64 || Size == 128)
265 return false;
266
267 return true;
268 }
269
270 // If this is a builtin, pointer, or complex type, it is ok.
271 if (Ty->getAsBuiltinType() || Ty->isPointerType() || Ty->isAnyComplexType())
272 return true;
273
274 // Arrays are treated like records.
275 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
276 return shouldReturnTypeInRegister(AT->getElementType(), Context);
277
278 // Otherwise, it must be a record type.
Ted Kremenekc23c7e62009-07-29 21:53:49 +0000279 const RecordType *RT = Ty->getAs<RecordType>();
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000280 if (!RT) return false;
281
282 // Structure types are passed in register if all fields would be
283 // passed in a register.
Argyrios Kyrtzidiscfbfe782009-06-30 02:36:12 +0000284 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
285 e = RT->getDecl()->field_end(); i != e; ++i) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000286 const FieldDecl *FD = *i;
287
288 // Empty fields are ignored.
289 if (isEmptyField(Context, FD))
290 continue;
291
292 // Check fields recursively.
293 if (!shouldReturnTypeInRegister(FD->getType(), Context))
294 return false;
295 }
296
297 return true;
298}
299
300ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +0000301 ASTContext &Context,
302 llvm::LLVMContext &VMContext) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000303 if (RetTy->isVoidType()) {
304 return ABIArgInfo::getIgnore();
305 } else if (const VectorType *VT = RetTy->getAsVectorType()) {
306 // On Darwin, some vectors are returned in registers.
David Chisnallde3a0692009-08-17 23:08:21 +0000307 if (IsDarwinVectorABI) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000308 uint64_t Size = Context.getTypeSize(RetTy);
309
310 // 128-bit vectors are a special case; they are returned in
311 // registers and we need to make sure to pick a type the LLVM
312 // backend will like.
313 if (Size == 128)
Owen Anderson41a75022009-08-13 21:57:51 +0000314 return ABIArgInfo::getCoerce(llvm::VectorType::get(
315 llvm::Type::getInt64Ty(VMContext), 2));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000316
317 // Always return in register if it fits in a general purpose
318 // register, or if it is 64 bits and has a single element.
319 if ((Size == 8 || Size == 16 || Size == 32) ||
320 (Size == 64 && VT->getNumElements() == 1))
Owen Anderson41a75022009-08-13 21:57:51 +0000321 return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000322
323 return ABIArgInfo::getIndirect(0);
324 }
325
326 return ABIArgInfo::getDirect();
327 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
328 // Structures with flexible arrays are always indirect.
329 if (const RecordType *RT = RetTy->getAsStructureType())
330 if (RT->getDecl()->hasFlexibleArrayMember())
331 return ABIArgInfo::getIndirect(0);
332
David Chisnallde3a0692009-08-17 23:08:21 +0000333 // If specified, structs and unions are always indirect.
334 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000335 return ABIArgInfo::getIndirect(0);
336
337 // Classify "single element" structs as their element type.
338 if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
339 if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) {
340 if (BT->isIntegerType()) {
341 // We need to use the size of the structure, padding
342 // bit-fields can adjust that to be larger than the single
343 // element type.
344 uint64_t Size = Context.getTypeSize(RetTy);
Owen Anderson170229f2009-07-14 23:10:40 +0000345 return ABIArgInfo::getCoerce(
Owen Anderson41a75022009-08-13 21:57:51 +0000346 llvm::IntegerType::get(VMContext, (unsigned) Size));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000347 } else if (BT->getKind() == BuiltinType::Float) {
348 assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
349 "Unexpect single element structure size!");
Owen Anderson41a75022009-08-13 21:57:51 +0000350 return ABIArgInfo::getCoerce(llvm::Type::getFloatTy(VMContext));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000351 } else if (BT->getKind() == BuiltinType::Double) {
352 assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
353 "Unexpect single element structure size!");
Owen Anderson41a75022009-08-13 21:57:51 +0000354 return ABIArgInfo::getCoerce(llvm::Type::getDoubleTy(VMContext));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000355 }
356 } else if (SeltTy->isPointerType()) {
357 // FIXME: It would be really nice if this could come out as the proper
358 // pointer type.
359 llvm::Type *PtrTy =
Owen Anderson41a75022009-08-13 21:57:51 +0000360 llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000361 return ABIArgInfo::getCoerce(PtrTy);
362 } else if (SeltTy->isVectorType()) {
363 // 64- and 128-bit vectors are never returned in a
364 // register when inside a structure.
365 uint64_t Size = Context.getTypeSize(RetTy);
366 if (Size == 64 || Size == 128)
367 return ABIArgInfo::getIndirect(0);
368
Owen Anderson170229f2009-07-14 23:10:40 +0000369 return classifyReturnType(QualType(SeltTy, 0), Context, VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000370 }
371 }
372
373 // Small structures which are register sized are generally returned
374 // in a register.
375 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
376 uint64_t Size = Context.getTypeSize(RetTy);
Owen Anderson41a75022009-08-13 21:57:51 +0000377 return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000378 }
379
380 return ABIArgInfo::getIndirect(0);
381 } else {
Anton Korobeynikov18adbf52009-06-06 09:36:29 +0000382 return (RetTy->isPromotableIntegerType() ?
383 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000384 }
385}
386
Eli Friedman3192cc82009-06-13 21:37:10 +0000387unsigned X86_32ABIInfo::getIndirectArgumentAlignment(QualType Ty,
388 ASTContext &Context) {
389 unsigned Align = Context.getTypeAlign(Ty);
390 if (Align < 128) return 0;
Ted Kremenekc23c7e62009-07-29 21:53:49 +0000391 if (const RecordType* RT = Ty->getAs<RecordType>())
Eli Friedman3192cc82009-06-13 21:37:10 +0000392 if (typeContainsSSEVector(RT->getDecl(), Context))
393 return 16;
394 return 0;
395}
396
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000397ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
Owen Anderson170229f2009-07-14 23:10:40 +0000398 ASTContext &Context,
399 llvm::LLVMContext &VMContext) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000400 // FIXME: Set alignment on indirect arguments.
401 if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
402 // Structures with flexible arrays are always indirect.
403 if (const RecordType *RT = Ty->getAsStructureType())
404 if (RT->getDecl()->hasFlexibleArrayMember())
Mike Stump11289f42009-09-09 15:08:12 +0000405 return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty,
Eli Friedman3192cc82009-06-13 21:37:10 +0000406 Context));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000407
408 // Ignore empty structs.
Eli Friedman3192cc82009-06-13 21:37:10 +0000409 if (Ty->isStructureType() && Context.getTypeSize(Ty) == 0)
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000410 return ABIArgInfo::getIgnore();
411
412 // Expand structs with size <= 128-bits which consist only of
413 // basic types (int, long long, float, double, xxx*). This is
414 // non-recursive and does not ignore empty fields.
415 if (const RecordType *RT = Ty->getAsStructureType()) {
416 if (Context.getTypeSize(Ty) <= 4*32 &&
417 areAllFields32Or64BitBasicType(RT->getDecl(), Context))
418 return ABIArgInfo::getExpand();
419 }
420
Eli Friedman3192cc82009-06-13 21:37:10 +0000421 return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty, Context));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000422 } else {
Anton Korobeynikov18adbf52009-06-06 09:36:29 +0000423 return (Ty->isPromotableIntegerType() ?
424 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000425 }
426}
427
428llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
429 CodeGenFunction &CGF) const {
Owen Anderson41a75022009-08-13 21:57:51 +0000430 const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(CGF.getLLVMContext()));
Owen Anderson9793f0e2009-07-29 22:16:19 +0000431 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000432
433 CGBuilderTy &Builder = CGF.Builder;
434 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
435 "ap");
436 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
437 llvm::Type *PTy =
Owen Anderson9793f0e2009-07-29 22:16:19 +0000438 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000439 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
440
441 uint64_t Offset =
442 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
443 llvm::Value *NextAddr =
Owen Anderson41a75022009-08-13 21:57:51 +0000444 Builder.CreateGEP(Addr, llvm::ConstantInt::get(
445 llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000446 "ap.next");
447 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
448
449 return AddrTyped;
450}
451
452namespace {
453/// X86_64ABIInfo - The X86_64 ABI information.
454class X86_64ABIInfo : public ABIInfo {
455 enum Class {
456 Integer = 0,
457 SSE,
458 SSEUp,
459 X87,
460 X87Up,
461 ComplexX87,
462 NoClass,
463 Memory
464 };
465
466 /// merge - Implement the X86_64 ABI merging algorithm.
467 ///
468 /// Merge an accumulating classification \arg Accum with a field
469 /// classification \arg Field.
470 ///
471 /// \param Accum - The accumulating classification. This should
472 /// always be either NoClass or the result of a previous merge
473 /// call. In addition, this should never be Memory (the caller
474 /// should just return Memory for the aggregate).
475 Class merge(Class Accum, Class Field) const;
476
477 /// classify - Determine the x86_64 register classes in which the
478 /// given type T should be passed.
479 ///
480 /// \param Lo - The classification for the parts of the type
481 /// residing in the low word of the containing object.
482 ///
483 /// \param Hi - The classification for the parts of the type
484 /// residing in the high word of the containing object.
485 ///
486 /// \param OffsetBase - The bit offset of this type in the
487 /// containing object. Some parameters are classified different
488 /// depending on whether they straddle an eightbyte boundary.
489 ///
490 /// If a word is unused its result will be NoClass; if a type should
491 /// be passed in Memory then at least the classification of \arg Lo
492 /// will be Memory.
493 ///
494 /// The \arg Lo class will be NoClass iff the argument is ignored.
495 ///
496 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
497 /// also be ComplexX87.
498 void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
499 Class &Lo, Class &Hi) const;
500
501 /// getCoerceResult - Given a source type \arg Ty and an LLVM type
502 /// to coerce to, chose the best way to pass Ty in the same place
503 /// that \arg CoerceTo would be passed, but while keeping the
504 /// emitted code as simple as possible.
505 ///
506 /// FIXME: Note, this should be cleaned up to just take an enumeration of all
507 /// the ways we might want to pass things, instead of constructing an LLVM
508 /// type. This makes this code more explicit, and it makes it clearer that we
509 /// are also doing this for correctness in the case of passing scalar types.
510 ABIArgInfo getCoerceResult(QualType Ty,
511 const llvm::Type *CoerceTo,
512 ASTContext &Context) const;
513
514 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
515 /// such that the argument will be passed in memory.
516 ABIArgInfo getIndirectResult(QualType Ty,
517 ASTContext &Context) const;
518
519 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +0000520 ASTContext &Context,
521 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000522
523 ABIArgInfo classifyArgumentType(QualType Ty,
524 ASTContext &Context,
Owen Anderson170229f2009-07-14 23:10:40 +0000525 llvm::LLVMContext &VMContext,
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000526 unsigned &neededInt,
527 unsigned &neededSSE) const;
528
529public:
Owen Anderson170229f2009-07-14 23:10:40 +0000530 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
531 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000532
533 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
534 CodeGenFunction &CGF) const;
535};
536}
537
538X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
539 Class Field) const {
540 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
541 // classified recursively so that always two fields are
542 // considered. The resulting class is calculated according to
543 // the classes of the fields in the eightbyte:
544 //
545 // (a) If both classes are equal, this is the resulting class.
546 //
547 // (b) If one of the classes is NO_CLASS, the resulting class is
548 // the other class.
549 //
550 // (c) If one of the classes is MEMORY, the result is the MEMORY
551 // class.
552 //
553 // (d) If one of the classes is INTEGER, the result is the
554 // INTEGER.
555 //
556 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
557 // MEMORY is used as class.
558 //
559 // (f) Otherwise class SSE is used.
560
561 // Accum should never be memory (we should have returned) or
562 // ComplexX87 (because this cannot be passed in a structure).
563 assert((Accum != Memory && Accum != ComplexX87) &&
564 "Invalid accumulated classification during merge.");
565 if (Accum == Field || Field == NoClass)
566 return Accum;
567 else if (Field == Memory)
568 return Memory;
569 else if (Accum == NoClass)
570 return Field;
571 else if (Accum == Integer || Field == Integer)
572 return Integer;
573 else if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
574 Accum == X87 || Accum == X87Up)
575 return Memory;
576 else
577 return SSE;
578}
579
580void X86_64ABIInfo::classify(QualType Ty,
581 ASTContext &Context,
582 uint64_t OffsetBase,
583 Class &Lo, Class &Hi) const {
584 // FIXME: This code can be simplified by introducing a simple value class for
585 // Class pairs with appropriate constructor methods for the various
586 // situations.
587
588 // FIXME: Some of the split computations are wrong; unaligned vectors
589 // shouldn't be passed in registers for example, so there is no chance they
590 // can straddle an eightbyte. Verify & simplify.
591
592 Lo = Hi = NoClass;
593
594 Class &Current = OffsetBase < 64 ? Lo : Hi;
595 Current = Memory;
596
597 if (const BuiltinType *BT = Ty->getAsBuiltinType()) {
598 BuiltinType::Kind k = BT->getKind();
599
600 if (k == BuiltinType::Void) {
601 Current = NoClass;
602 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
603 Lo = Integer;
604 Hi = Integer;
605 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
606 Current = Integer;
607 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
608 Current = SSE;
609 } else if (k == BuiltinType::LongDouble) {
610 Lo = X87;
611 Hi = X87Up;
612 }
613 // FIXME: _Decimal32 and _Decimal64 are SSE.
614 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
615 } else if (const EnumType *ET = Ty->getAsEnumType()) {
616 // Classify the underlying integer type.
617 classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
618 } else if (Ty->hasPointerRepresentation()) {
619 Current = Integer;
620 } else if (const VectorType *VT = Ty->getAsVectorType()) {
621 uint64_t Size = Context.getTypeSize(VT);
622 if (Size == 32) {
623 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
624 // float> as integer.
625 Current = Integer;
626
627 // If this type crosses an eightbyte boundary, it should be
628 // split.
629 uint64_t EB_Real = (OffsetBase) / 64;
630 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
631 if (EB_Real != EB_Imag)
632 Hi = Lo;
633 } else if (Size == 64) {
634 // gcc passes <1 x double> in memory. :(
635 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
636 return;
637
638 // gcc passes <1 x long long> as INTEGER.
639 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
640 Current = Integer;
641 else
642 Current = SSE;
643
644 // If this type crosses an eightbyte boundary, it should be
645 // split.
646 if (OffsetBase && OffsetBase != 64)
647 Hi = Lo;
648 } else if (Size == 128) {
649 Lo = SSE;
650 Hi = SSEUp;
651 }
652 } else if (const ComplexType *CT = Ty->getAsComplexType()) {
653 QualType ET = Context.getCanonicalType(CT->getElementType());
654
655 uint64_t Size = Context.getTypeSize(Ty);
656 if (ET->isIntegralType()) {
657 if (Size <= 64)
658 Current = Integer;
659 else if (Size <= 128)
660 Lo = Hi = Integer;
661 } else if (ET == Context.FloatTy)
662 Current = SSE;
663 else if (ET == Context.DoubleTy)
664 Lo = Hi = SSE;
665 else if (ET == Context.LongDoubleTy)
666 Current = ComplexX87;
667
668 // If this complex type crosses an eightbyte boundary then it
669 // should be split.
670 uint64_t EB_Real = (OffsetBase) / 64;
671 uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
672 if (Hi == NoClass && EB_Real != EB_Imag)
673 Hi = Lo;
674 } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
675 // Arrays are treated like structures.
676
677 uint64_t Size = Context.getTypeSize(Ty);
678
679 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
680 // than two eightbytes, ..., it has class MEMORY.
681 if (Size > 128)
682 return;
683
684 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
685 // fields, it has class MEMORY.
686 //
687 // Only need to check alignment of array base.
688 if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
689 return;
690
691 // Otherwise implement simplified merge. We could be smarter about
692 // this, but it isn't worth it and would be harder to verify.
693 Current = NoClass;
694 uint64_t EltSize = Context.getTypeSize(AT->getElementType());
695 uint64_t ArraySize = AT->getSize().getZExtValue();
696 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
697 Class FieldLo, FieldHi;
698 classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
699 Lo = merge(Lo, FieldLo);
700 Hi = merge(Hi, FieldHi);
701 if (Lo == Memory || Hi == Memory)
702 break;
703 }
704
705 // Do post merger cleanup (see below). Only case we worry about is Memory.
706 if (Hi == Memory)
707 Lo = Memory;
708 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
Ted Kremenekc23c7e62009-07-29 21:53:49 +0000709 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000710 uint64_t Size = Context.getTypeSize(Ty);
711
712 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
713 // than two eightbytes, ..., it has class MEMORY.
714 if (Size > 128)
715 return;
716
717 const RecordDecl *RD = RT->getDecl();
718
719 // Assume variable sized types are passed in memory.
720 if (RD->hasFlexibleArrayMember())
721 return;
722
723 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
724
725 // Reset Lo class, this will be recomputed.
726 Current = NoClass;
727 unsigned idx = 0;
Argyrios Kyrtzidiscfbfe782009-06-30 02:36:12 +0000728 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
729 i != e; ++i, ++idx) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000730 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
731 bool BitField = i->isBitField();
732
733 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
734 // fields, it has class MEMORY.
735 //
736 // Note, skip this test for bit-fields, see below.
737 if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
738 Lo = Memory;
739 return;
740 }
741
742 // Classify this field.
743 //
744 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
745 // exceeds a single eightbyte, each is classified
746 // separately. Each eightbyte gets initialized to class
747 // NO_CLASS.
748 Class FieldLo, FieldHi;
749
750 // Bit-fields require special handling, they do not force the
751 // structure to be passed in memory even if unaligned, and
752 // therefore they can straddle an eightbyte.
753 if (BitField) {
754 // Ignore padding bit-fields.
755 if (i->isUnnamedBitfield())
756 continue;
757
758 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
759 uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
760
761 uint64_t EB_Lo = Offset / 64;
762 uint64_t EB_Hi = (Offset + Size - 1) / 64;
763 FieldLo = FieldHi = NoClass;
764 if (EB_Lo) {
765 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
766 FieldLo = NoClass;
767 FieldHi = Integer;
768 } else {
769 FieldLo = Integer;
770 FieldHi = EB_Hi ? Integer : NoClass;
771 }
772 } else
773 classify(i->getType(), Context, Offset, FieldLo, FieldHi);
774 Lo = merge(Lo, FieldLo);
775 Hi = merge(Hi, FieldHi);
776 if (Lo == Memory || Hi == Memory)
777 break;
778 }
779
780 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
781 //
782 // (a) If one of the classes is MEMORY, the whole argument is
783 // passed in memory.
784 //
785 // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
786
787 // The first of these conditions is guaranteed by how we implement
788 // the merge (just bail).
789 //
790 // The second condition occurs in the case of unions; for example
791 // union { _Complex double; unsigned; }.
792 if (Hi == Memory)
793 Lo = Memory;
794 if (Hi == SSEUp && Lo != SSE)
795 Hi = SSE;
796 }
797}
798
799ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
800 const llvm::Type *CoerceTo,
801 ASTContext &Context) const {
Owen Anderson41a75022009-08-13 21:57:51 +0000802 if (CoerceTo == llvm::Type::getInt64Ty(CoerceTo->getContext())) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000803 // Integer and pointer types will end up in a general purpose
804 // register.
805 if (Ty->isIntegralType() || Ty->isPointerType())
Anton Korobeynikov18adbf52009-06-06 09:36:29 +0000806 return (Ty->isPromotableIntegerType() ?
807 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Owen Anderson41a75022009-08-13 21:57:51 +0000808 } else if (CoerceTo == llvm::Type::getDoubleTy(CoerceTo->getContext())) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000809 // FIXME: It would probably be better to make CGFunctionInfo only map using
810 // canonical types than to canonize here.
811 QualType CTy = Context.getCanonicalType(Ty);
812
813 // Float and double end up in a single SSE reg.
814 if (CTy == Context.FloatTy || CTy == Context.DoubleTy)
815 return ABIArgInfo::getDirect();
816
817 }
818
819 return ABIArgInfo::getCoerce(CoerceTo);
820}
821
822ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
823 ASTContext &Context) const {
824 // If this is a scalar LLVM value then assume LLVM will pass it in the right
825 // place naturally.
826 if (!CodeGenFunction::hasAggregateLLVMType(Ty))
Anton Korobeynikov18adbf52009-06-06 09:36:29 +0000827 return (Ty->isPromotableIntegerType() ?
828 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000829
830 // FIXME: Set alignment correctly.
831 return ABIArgInfo::getIndirect(0);
832}
833
834ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +0000835 ASTContext &Context,
836 llvm::LLVMContext &VMContext) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000837 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
838 // classification algorithm.
839 X86_64ABIInfo::Class Lo, Hi;
840 classify(RetTy, Context, 0, Lo, Hi);
841
842 // Check some invariants.
843 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
844 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
845 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
846
847 const llvm::Type *ResType = 0;
848 switch (Lo) {
849 case NoClass:
850 return ABIArgInfo::getIgnore();
851
852 case SSEUp:
853 case X87Up:
854 assert(0 && "Invalid classification for lo word.");
855
856 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
857 // hidden argument.
858 case Memory:
859 return getIndirectResult(RetTy, Context);
860
861 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
862 // available register of the sequence %rax, %rdx is used.
863 case Integer:
Owen Anderson41a75022009-08-13 21:57:51 +0000864 ResType = llvm::Type::getInt64Ty(VMContext); break;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000865
866 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
867 // available SSE register of the sequence %xmm0, %xmm1 is used.
868 case SSE:
Owen Anderson41a75022009-08-13 21:57:51 +0000869 ResType = llvm::Type::getDoubleTy(VMContext); break;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000870
871 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
872 // returned on the X87 stack in %st0 as 80-bit x87 number.
873 case X87:
Owen Anderson41a75022009-08-13 21:57:51 +0000874 ResType = llvm::Type::getX86_FP80Ty(VMContext); break;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000875
876 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
877 // part of the value is returned in %st0 and the imaginary part in
878 // %st1.
879 case ComplexX87:
880 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
Owen Anderson41a75022009-08-13 21:57:51 +0000881 ResType = llvm::StructType::get(VMContext, llvm::Type::getX86_FP80Ty(VMContext),
882 llvm::Type::getX86_FP80Ty(VMContext),
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000883 NULL);
884 break;
885 }
886
887 switch (Hi) {
888 // Memory was handled previously and X87 should
889 // never occur as a hi class.
890 case Memory:
891 case X87:
892 assert(0 && "Invalid classification for hi word.");
893
894 case ComplexX87: // Previously handled.
895 case NoClass: break;
896
897 case Integer:
Owen Anderson758428f2009-08-05 23:18:46 +0000898 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson41a75022009-08-13 21:57:51 +0000899 llvm::Type::getInt64Ty(VMContext), NULL);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000900 break;
901 case SSE:
Owen Anderson758428f2009-08-05 23:18:46 +0000902 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson41a75022009-08-13 21:57:51 +0000903 llvm::Type::getDoubleTy(VMContext), NULL);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000904 break;
905
906 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
907 // is passed in the upper half of the last used SSE register.
908 //
909 // SSEUP should always be preceeded by SSE, just widen.
910 case SSEUp:
911 assert(Lo == SSE && "Unexpected SSEUp classification.");
Owen Anderson41a75022009-08-13 21:57:51 +0000912 ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000913 break;
914
915 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
916 // returned together with the previous X87 value in %st0.
917 case X87Up:
918 // If X87Up is preceeded by X87, we don't need to do
919 // anything. However, in some cases with unions it may not be
920 // preceeded by X87. In such situations we follow gcc and pass the
921 // extra bits in an SSE reg.
922 if (Lo != X87)
Owen Anderson758428f2009-08-05 23:18:46 +0000923 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson41a75022009-08-13 21:57:51 +0000924 llvm::Type::getDoubleTy(VMContext), NULL);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000925 break;
926 }
927
928 return getCoerceResult(RetTy, ResType, Context);
929}
930
931ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
Owen Anderson170229f2009-07-14 23:10:40 +0000932 llvm::LLVMContext &VMContext,
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000933 unsigned &neededInt,
934 unsigned &neededSSE) const {
935 X86_64ABIInfo::Class Lo, Hi;
936 classify(Ty, Context, 0, Lo, Hi);
937
938 // Check some invariants.
939 // FIXME: Enforce these by construction.
940 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
941 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
942 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
943
944 neededInt = 0;
945 neededSSE = 0;
946 const llvm::Type *ResType = 0;
947 switch (Lo) {
948 case NoClass:
949 return ABIArgInfo::getIgnore();
950
951 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
952 // on the stack.
953 case Memory:
954
955 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
956 // COMPLEX_X87, it is passed in memory.
957 case X87:
958 case ComplexX87:
959 return getIndirectResult(Ty, Context);
960
961 case SSEUp:
962 case X87Up:
963 assert(0 && "Invalid classification for lo word.");
964
965 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
966 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
967 // and %r9 is used.
968 case Integer:
969 ++neededInt;
Owen Anderson41a75022009-08-13 21:57:51 +0000970 ResType = llvm::Type::getInt64Ty(VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000971 break;
972
973 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
974 // available SSE register is used, the registers are taken in the
975 // order from %xmm0 to %xmm7.
976 case SSE:
977 ++neededSSE;
Owen Anderson41a75022009-08-13 21:57:51 +0000978 ResType = llvm::Type::getDoubleTy(VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000979 break;
980 }
981
982 switch (Hi) {
983 // Memory was handled previously, ComplexX87 and X87 should
984 // never occur as hi classes, and X87Up must be preceed by X87,
985 // which is passed in memory.
986 case Memory:
987 case X87:
988 case ComplexX87:
989 assert(0 && "Invalid classification for hi word.");
990 break;
991
992 case NoClass: break;
993 case Integer:
Owen Anderson758428f2009-08-05 23:18:46 +0000994 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson41a75022009-08-13 21:57:51 +0000995 llvm::Type::getInt64Ty(VMContext), NULL);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000996 ++neededInt;
997 break;
998
999 // X87Up generally doesn't occur here (long double is passed in
1000 // memory), except in situations involving unions.
1001 case X87Up:
1002 case SSE:
Owen Anderson758428f2009-08-05 23:18:46 +00001003 ResType = llvm::StructType::get(VMContext, ResType,
Owen Anderson41a75022009-08-13 21:57:51 +00001004 llvm::Type::getDoubleTy(VMContext), NULL);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001005 ++neededSSE;
1006 break;
1007
1008 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
1009 // eightbyte is passed in the upper half of the last used SSE
1010 // register.
1011 case SSEUp:
1012 assert(Lo == SSE && "Unexpected SSEUp classification.");
Owen Anderson41a75022009-08-13 21:57:51 +00001013 ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001014 break;
1015 }
1016
1017 return getCoerceResult(Ty, ResType, Context);
1018}
1019
Owen Anderson170229f2009-07-14 23:10:40 +00001020void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1021 llvm::LLVMContext &VMContext) const {
1022 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
1023 Context, VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001024
1025 // Keep track of the number of assigned registers.
1026 unsigned freeIntRegs = 6, freeSSERegs = 8;
1027
1028 // If the return value is indirect, then the hidden argument is consuming one
1029 // integer register.
1030 if (FI.getReturnInfo().isIndirect())
1031 --freeIntRegs;
1032
1033 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
1034 // get assigned (in left-to-right order) for passing as follows...
1035 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1036 it != ie; ++it) {
1037 unsigned neededInt, neededSSE;
Mike Stump11289f42009-09-09 15:08:12 +00001038 it->info = classifyArgumentType(it->type, Context, VMContext,
Owen Anderson170229f2009-07-14 23:10:40 +00001039 neededInt, neededSSE);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001040
1041 // AMD64-ABI 3.2.3p3: If there are no registers available for any
1042 // eightbyte of an argument, the whole argument is passed on the
1043 // stack. If registers have already been assigned for some
1044 // eightbytes of such an argument, the assignments get reverted.
1045 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
1046 freeIntRegs -= neededInt;
1047 freeSSERegs -= neededSSE;
1048 } else {
1049 it->info = getIndirectResult(it->type, Context);
1050 }
1051 }
1052}
1053
1054static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
1055 QualType Ty,
1056 CodeGenFunction &CGF) {
1057 llvm::Value *overflow_arg_area_p =
1058 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
1059 llvm::Value *overflow_arg_area =
1060 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
1061
1062 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
1063 // byte boundary if alignment needed by type exceeds 8 byte boundary.
1064 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
1065 if (Align > 8) {
1066 // Note that we follow the ABI & gcc here, even though the type
1067 // could in theory have an alignment greater than 16. This case
1068 // shouldn't ever matter in practice.
1069
1070 // overflow_arg_area = (overflow_arg_area + 15) & ~15;
Owen Anderson41a75022009-08-13 21:57:51 +00001071 llvm::Value *Offset =
1072 llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), 15);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001073 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
1074 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
Owen Anderson41a75022009-08-13 21:57:51 +00001075 llvm::Type::getInt64Ty(CGF.getLLVMContext()));
1076 llvm::Value *Mask = llvm::ConstantInt::get(
1077 llvm::Type::getInt64Ty(CGF.getLLVMContext()), ~15LL);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001078 overflow_arg_area =
1079 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1080 overflow_arg_area->getType(),
1081 "overflow_arg_area.align");
1082 }
1083
1084 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
1085 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1086 llvm::Value *Res =
1087 CGF.Builder.CreateBitCast(overflow_arg_area,
Owen Anderson9793f0e2009-07-29 22:16:19 +00001088 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001089
1090 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
1091 // l->overflow_arg_area + sizeof(type).
1092 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
1093 // an 8 byte boundary.
1094
1095 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
Owen Anderson41a75022009-08-13 21:57:51 +00001096 llvm::Value *Offset =
1097 llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()),
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001098 (SizeInBytes + 7) & ~7);
1099 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
1100 "overflow_arg_area.next");
1101 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
1102
1103 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
1104 return Res;
1105}
1106
1107llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1108 CodeGenFunction &CGF) const {
Owen Anderson170229f2009-07-14 23:10:40 +00001109 llvm::LLVMContext &VMContext = CGF.getLLVMContext();
Daniel Dunbard59655c2009-09-12 00:59:49 +00001110 const llvm::Type *i32Ty = llvm::Type::getInt32Ty(VMContext);
1111 const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
Mike Stump11289f42009-09-09 15:08:12 +00001112
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001113 // Assume that va_list type is correct; should be pointer to LLVM type:
1114 // struct {
1115 // i32 gp_offset;
1116 // i32 fp_offset;
1117 // i8* overflow_arg_area;
1118 // i8* reg_save_area;
1119 // };
1120 unsigned neededInt, neededSSE;
Owen Anderson170229f2009-07-14 23:10:40 +00001121 ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), VMContext,
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001122 neededInt, neededSSE);
1123
1124 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
1125 // in the registers. If not go to step 7.
1126 if (!neededInt && !neededSSE)
1127 return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1128
1129 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
1130 // general purpose registers needed to pass type and num_fp to hold
1131 // the number of floating point registers needed.
1132
1133 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
1134 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
1135 // l->fp_offset > 304 - num_fp * 16 go to step 7.
1136 //
1137 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
1138 // register save space).
1139
1140 llvm::Value *InRegs = 0;
1141 llvm::Value *gp_offset_p = 0, *gp_offset = 0;
1142 llvm::Value *fp_offset_p = 0, *fp_offset = 0;
1143 if (neededInt) {
1144 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
1145 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
1146 InRegs =
1147 CGF.Builder.CreateICmpULE(gp_offset,
Daniel Dunbard59655c2009-09-12 00:59:49 +00001148 llvm::ConstantInt::get(i32Ty,
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001149 48 - neededInt * 8),
1150 "fits_in_gp");
1151 }
1152
1153 if (neededSSE) {
1154 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
1155 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
1156 llvm::Value *FitsInFP =
1157 CGF.Builder.CreateICmpULE(fp_offset,
Daniel Dunbard59655c2009-09-12 00:59:49 +00001158 llvm::ConstantInt::get(i32Ty,
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001159 176 - neededSSE * 16),
1160 "fits_in_fp");
1161 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
1162 }
1163
1164 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
1165 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
1166 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
1167 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
1168
1169 // Emit code to load the value if it was passed in registers.
1170
1171 CGF.EmitBlock(InRegBlock);
1172
1173 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
1174 // an offset of l->gp_offset and/or l->fp_offset. This may require
1175 // copying to a temporary location in case the parameter is passed
1176 // in different register classes or requires an alignment greater
1177 // than 8 for general purpose registers and 16 for XMM registers.
1178 //
1179 // FIXME: This really results in shameful code when we end up needing to
1180 // collect arguments from different places; often what should result in a
1181 // simple assembling of a structure from scattered addresses has many more
1182 // loads than necessary. Can we clean this up?
1183 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1184 llvm::Value *RegAddr =
1185 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
1186 "reg_save_area");
1187 if (neededInt && neededSSE) {
1188 // FIXME: Cleanup.
1189 assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
1190 const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
1191 llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
1192 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
1193 const llvm::Type *TyLo = ST->getElementType(0);
1194 const llvm::Type *TyHi = ST->getElementType(1);
1195 assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) &&
1196 "Unexpected ABI info for mixed regs");
Owen Anderson9793f0e2009-07-29 22:16:19 +00001197 const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
1198 const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001199 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1200 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1201 llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr;
1202 llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr;
1203 llvm::Value *V =
1204 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
1205 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1206 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
1207 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1208
Owen Anderson170229f2009-07-14 23:10:40 +00001209 RegAddr = CGF.Builder.CreateBitCast(Tmp,
Owen Anderson9793f0e2009-07-29 22:16:19 +00001210 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001211 } else if (neededInt) {
1212 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1213 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
Owen Anderson9793f0e2009-07-29 22:16:19 +00001214 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001215 } else {
1216 if (neededSSE == 1) {
1217 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1218 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
Owen Anderson9793f0e2009-07-29 22:16:19 +00001219 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001220 } else {
1221 assert(neededSSE == 2 && "Invalid number of needed registers!");
1222 // SSE registers are spaced 16 bytes apart in the register save
1223 // area, we need to collect the two eightbytes together.
1224 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1225 llvm::Value *RegAddrHi =
1226 CGF.Builder.CreateGEP(RegAddrLo,
Daniel Dunbard59655c2009-09-12 00:59:49 +00001227 llvm::ConstantInt::get(i32Ty, 16));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001228 const llvm::Type *DblPtrTy =
Daniel Dunbard59655c2009-09-12 00:59:49 +00001229 llvm::PointerType::getUnqual(DoubleTy);
1230 const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
1231 DoubleTy, NULL);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001232 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
1233 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
1234 DblPtrTy));
1235 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1236 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
1237 DblPtrTy));
1238 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1239 RegAddr = CGF.Builder.CreateBitCast(Tmp,
Owen Anderson9793f0e2009-07-29 22:16:19 +00001240 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001241 }
1242 }
1243
1244 // AMD64-ABI 3.5.7p5: Step 5. Set:
1245 // l->gp_offset = l->gp_offset + num_gp * 8
1246 // l->fp_offset = l->fp_offset + num_fp * 16.
1247 if (neededInt) {
Daniel Dunbard59655c2009-09-12 00:59:49 +00001248 llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededInt * 8);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001249 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
1250 gp_offset_p);
1251 }
1252 if (neededSSE) {
Daniel Dunbard59655c2009-09-12 00:59:49 +00001253 llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededSSE * 16);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001254 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
1255 fp_offset_p);
1256 }
1257 CGF.EmitBranch(ContBlock);
1258
1259 // Emit code to load the value if it was passed in memory.
1260
1261 CGF.EmitBlock(InMemBlock);
1262 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1263
1264 // Return the appropriate result.
1265
1266 CGF.EmitBlock(ContBlock);
1267 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
1268 "vaarg.addr");
1269 ResAddr->reserveOperandSpace(2);
1270 ResAddr->addIncoming(RegAddr, InRegBlock);
1271 ResAddr->addIncoming(MemAddr, InMemBlock);
1272
1273 return ResAddr;
1274}
1275
Daniel Dunbard59655c2009-09-12 00:59:49 +00001276// PIC16 ABI Implementation
1277
1278namespace {
1279
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001280class PIC16ABIInfo : public ABIInfo {
1281 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +00001282 ASTContext &Context,
1283 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001284
1285 ABIArgInfo classifyArgumentType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +00001286 ASTContext &Context,
1287 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001288
Owen Anderson170229f2009-07-14 23:10:40 +00001289 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1290 llvm::LLVMContext &VMContext) const {
1291 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
1292 VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001293 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1294 it != ie; ++it)
Owen Anderson170229f2009-07-14 23:10:40 +00001295 it->info = classifyArgumentType(it->type, Context, VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001296 }
1297
1298 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1299 CodeGenFunction &CGF) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001300};
1301
Daniel Dunbard59655c2009-09-12 00:59:49 +00001302}
1303
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001304ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +00001305 ASTContext &Context,
1306 llvm::LLVMContext &VMContext) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001307 if (RetTy->isVoidType()) {
1308 return ABIArgInfo::getIgnore();
1309 } else {
1310 return ABIArgInfo::getDirect();
1311 }
1312}
1313
1314ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
Owen Anderson170229f2009-07-14 23:10:40 +00001315 ASTContext &Context,
1316 llvm::LLVMContext &VMContext) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001317 return ABIArgInfo::getDirect();
1318}
1319
1320llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1321 CodeGenFunction &CGF) const {
1322 return 0;
1323}
1324
Daniel Dunbard59655c2009-09-12 00:59:49 +00001325// ARM ABI Implementation
1326
1327namespace {
1328
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001329class ARMABIInfo : public ABIInfo {
1330 ABIArgInfo classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +00001331 ASTContext &Context,
1332 llvm::LLVMContext &VMCOntext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001333
1334 ABIArgInfo classifyArgumentType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +00001335 ASTContext &Context,
1336 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001337
Owen Anderson170229f2009-07-14 23:10:40 +00001338 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1339 llvm::LLVMContext &VMContext) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001340
1341 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1342 CodeGenFunction &CGF) const;
1343};
1344
Daniel Dunbard59655c2009-09-12 00:59:49 +00001345}
1346
Owen Anderson170229f2009-07-14 23:10:40 +00001347void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1348 llvm::LLVMContext &VMContext) const {
Mike Stump11289f42009-09-09 15:08:12 +00001349 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
Owen Anderson170229f2009-07-14 23:10:40 +00001350 VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001351 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1352 it != ie; ++it) {
Owen Anderson170229f2009-07-14 23:10:40 +00001353 it->info = classifyArgumentType(it->type, Context, VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001354 }
1355}
1356
1357ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
Owen Anderson170229f2009-07-14 23:10:40 +00001358 ASTContext &Context,
1359 llvm::LLVMContext &VMContext) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001360 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
Anton Korobeynikov18adbf52009-06-06 09:36:29 +00001361 return (Ty->isPromotableIntegerType() ?
1362 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001363 }
1364 // FIXME: This is kind of nasty... but there isn't much choice because the ARM
1365 // backend doesn't support byval.
1366 // FIXME: This doesn't handle alignment > 64 bits.
1367 const llvm::Type* ElemTy;
1368 unsigned SizeRegs;
1369 if (Context.getTypeAlign(Ty) > 32) {
Owen Anderson41a75022009-08-13 21:57:51 +00001370 ElemTy = llvm::Type::getInt64Ty(VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001371 SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
1372 } else {
Owen Anderson41a75022009-08-13 21:57:51 +00001373 ElemTy = llvm::Type::getInt32Ty(VMContext);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001374 SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
1375 }
1376 std::vector<const llvm::Type*> LLVMFields;
Owen Anderson9793f0e2009-07-29 22:16:19 +00001377 LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
Owen Anderson758428f2009-08-05 23:18:46 +00001378 const llvm::Type* STy = llvm::StructType::get(VMContext, LLVMFields, true);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001379 return ABIArgInfo::getCoerce(STy);
1380}
1381
1382ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +00001383 ASTContext &Context,
1384 llvm::LLVMContext &VMContext) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001385 if (RetTy->isVoidType()) {
1386 return ABIArgInfo::getIgnore();
1387 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1388 // Aggregates <= 4 bytes are returned in r0; other aggregates
1389 // are returned indirectly.
1390 uint64_t Size = Context.getTypeSize(RetTy);
1391 if (Size <= 32)
Owen Anderson41a75022009-08-13 21:57:51 +00001392 return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001393 return ABIArgInfo::getIndirect(0);
1394 } else {
Anton Korobeynikov18adbf52009-06-06 09:36:29 +00001395 return (RetTy->isPromotableIntegerType() ?
1396 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001397 }
1398}
1399
1400llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1401 CodeGenFunction &CGF) const {
1402 // FIXME: Need to handle alignment
Mike Stump11289f42009-09-09 15:08:12 +00001403 const llvm::Type *BP =
Owen Anderson41a75022009-08-13 21:57:51 +00001404 llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(CGF.getLLVMContext()));
Owen Anderson9793f0e2009-07-29 22:16:19 +00001405 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001406
1407 CGBuilderTy &Builder = CGF.Builder;
1408 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1409 "ap");
1410 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
1411 llvm::Type *PTy =
Owen Anderson9793f0e2009-07-29 22:16:19 +00001412 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001413 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1414
1415 uint64_t Offset =
1416 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
1417 llvm::Value *NextAddr =
Owen Anderson41a75022009-08-13 21:57:51 +00001418 Builder.CreateGEP(Addr, llvm::ConstantInt::get(
1419 llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001420 "ap.next");
1421 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1422
1423 return AddrTyped;
1424}
1425
1426ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
Owen Anderson170229f2009-07-14 23:10:40 +00001427 ASTContext &Context,
1428 llvm::LLVMContext &VMContext) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001429 if (RetTy->isVoidType()) {
1430 return ABIArgInfo::getIgnore();
1431 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1432 return ABIArgInfo::getIndirect(0);
1433 } else {
Anton Korobeynikov18adbf52009-06-06 09:36:29 +00001434 return (RetTy->isPromotableIntegerType() ?
1435 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001436 }
1437}
1438
Daniel Dunbard59655c2009-09-12 00:59:49 +00001439// SystemZ ABI Implementation
1440
Anton Korobeynikovb5b703b2009-07-16 20:09:57 +00001441namespace {
Daniel Dunbard59655c2009-09-12 00:59:49 +00001442
Anton Korobeynikovb5b703b2009-07-16 20:09:57 +00001443class SystemZABIInfo : public ABIInfo {
1444 bool isPromotableIntegerType(QualType Ty) const;
1445
1446 ABIArgInfo classifyReturnType(QualType RetTy, ASTContext &Context,
1447 llvm::LLVMContext &VMContext) const;
1448
1449 ABIArgInfo classifyArgumentType(QualType RetTy, ASTContext &Context,
1450 llvm::LLVMContext &VMContext) const;
1451
1452 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1453 llvm::LLVMContext &VMContext) const {
1454 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
1455 Context, VMContext);
1456 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1457 it != ie; ++it)
1458 it->info = classifyArgumentType(it->type, Context, VMContext);
1459 }
1460
1461 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1462 CodeGenFunction &CGF) const;
1463};
Daniel Dunbard59655c2009-09-12 00:59:49 +00001464
Anton Korobeynikovb5b703b2009-07-16 20:09:57 +00001465}
1466
1467bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
1468 // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended.
1469 if (const BuiltinType *BT = Ty->getAsBuiltinType())
1470 switch (BT->getKind()) {
1471 case BuiltinType::Bool:
1472 case BuiltinType::Char_S:
1473 case BuiltinType::Char_U:
1474 case BuiltinType::SChar:
1475 case BuiltinType::UChar:
1476 case BuiltinType::Short:
1477 case BuiltinType::UShort:
1478 case BuiltinType::Int:
1479 case BuiltinType::UInt:
1480 return true;
1481 default:
1482 return false;
1483 }
1484 return false;
1485}
1486
1487llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1488 CodeGenFunction &CGF) const {
1489 // FIXME: Implement
1490 return 0;
1491}
1492
1493
1494ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy,
1495 ASTContext &Context,
Daniel Dunbard59655c2009-09-12 00:59:49 +00001496 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovb5b703b2009-07-16 20:09:57 +00001497 if (RetTy->isVoidType()) {
1498 return ABIArgInfo::getIgnore();
1499 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1500 return ABIArgInfo::getIndirect(0);
1501 } else {
1502 return (isPromotableIntegerType(RetTy) ?
1503 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1504 }
1505}
1506
1507ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty,
1508 ASTContext &Context,
Daniel Dunbard59655c2009-09-12 00:59:49 +00001509 llvm::LLVMContext &VMContext) const {
Anton Korobeynikovb5b703b2009-07-16 20:09:57 +00001510 if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
1511 return ABIArgInfo::getIndirect(0);
1512 } else {
1513 return (isPromotableIntegerType(Ty) ?
1514 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1515 }
1516}
1517
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001518ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
Owen Anderson170229f2009-07-14 23:10:40 +00001519 ASTContext &Context,
1520 llvm::LLVMContext &VMContext) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001521 if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
1522 return ABIArgInfo::getIndirect(0);
1523 } else {
Anton Korobeynikov18adbf52009-06-06 09:36:29 +00001524 return (Ty->isPromotableIntegerType() ?
1525 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001526 }
1527}
1528
1529llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1530 CodeGenFunction &CGF) const {
1531 return 0;
1532}
1533
1534const ABIInfo &CodeGenTypes::getABIInfo() const {
1535 if (TheABIInfo)
1536 return *TheABIInfo;
1537
Daniel Dunbare3532f82009-08-24 08:52:16 +00001538 // For now we just cache the ABIInfo in CodeGenTypes and don't free it.
1539
Daniel Dunbar40165182009-08-24 09:10:05 +00001540 const llvm::Triple &Triple(getContext().Target.getTriple());
1541 switch (Triple.getArch()) {
Daniel Dunbare3532f82009-08-24 08:52:16 +00001542 default:
1543 return *(TheABIInfo = new DefaultABIInfo);
1544
Daniel Dunbard59655c2009-09-12 00:59:49 +00001545 case llvm::Triple::arm:
1546 case llvm::Triple::thumb:
1547 // FIXME: Support for OABI?
1548 return *(TheABIInfo = new ARMABIInfo());
1549
1550 case llvm::Triple::pic16:
1551 return *(TheABIInfo = new PIC16ABIInfo());
1552
1553 case llvm::Triple::systemz:
1554 return *(TheABIInfo = new SystemZABIInfo());
1555
Daniel Dunbar40165182009-08-24 09:10:05 +00001556 case llvm::Triple::x86:
1557 if (Triple.getOS() == llvm::Triple::Darwin)
Daniel Dunbare3532f82009-08-24 08:52:16 +00001558 return *(TheABIInfo = new X86_32ABIInfo(Context, true, true));
1559
Daniel Dunbar40165182009-08-24 09:10:05 +00001560 switch (Triple.getOS()) {
Daniel Dunbare3532f82009-08-24 08:52:16 +00001561 case llvm::Triple::Cygwin:
1562 case llvm::Triple::DragonFly:
1563 case llvm::Triple::MinGW32:
1564 case llvm::Triple::MinGW64:
David Chisnall2c5bef22009-09-03 01:48:05 +00001565 case llvm::Triple::FreeBSD:
Daniel Dunbare3532f82009-08-24 08:52:16 +00001566 case llvm::Triple::OpenBSD:
1567 return *(TheABIInfo = new X86_32ABIInfo(Context, false, true));
1568
1569 default:
1570 return *(TheABIInfo = new X86_32ABIInfo(Context, false, false));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001571 }
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001572
Daniel Dunbare3532f82009-08-24 08:52:16 +00001573 case llvm::Triple::x86_64:
1574 return *(TheABIInfo = new X86_64ABIInfo());
Daniel Dunbare3532f82009-08-24 08:52:16 +00001575 }
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001576}