blob: 49c6f0e3a611f3c9f4dec8aaf1aae632dfb64296 [file] [log] [blame]
Daniel Dunbara8f02052008-09-08 21:33:45 +00001//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
15#include "CGCall.h"
16#include "CodeGenFunction.h"
Daniel Dunbar3ef2e852008-09-10 00:41:16 +000017#include "CodeGenModule.h"
Daniel Dunbarf98eeff2008-10-13 17:02:26 +000018#include "clang/Basic/TargetInfo.h"
Daniel Dunbara8f02052008-09-08 21:33:45 +000019#include "clang/AST/ASTContext.h"
20#include "clang/AST/Decl.h"
Anders Carlsson7a785352009-04-03 22:48:58 +000021#include "clang/AST/DeclCXX.h"
Daniel Dunbara8f02052008-09-08 21:33:45 +000022#include "clang/AST/DeclObjC.h"
Daniel Dunbar51a2d192009-01-29 08:13:58 +000023#include "clang/AST/RecordLayout.h"
Daniel Dunbar04d35782008-09-17 00:51:38 +000024#include "llvm/ADT/StringExtras.h"
Devang Patel98bfe502008-09-24 01:01:36 +000025#include "llvm/Attributes.h"
Daniel Dunbar90e43452009-03-02 04:32:35 +000026#include "llvm/Support/CallSite.h"
Daniel Dunbare09a9692009-01-24 08:32:22 +000027#include "llvm/Support/CommandLine.h"
Daniel Dunbar3cfcec72009-02-12 09:04:14 +000028#include "llvm/Support/MathExtras.h"
Daniel Dunbar9f4874e2009-02-04 23:24:38 +000029#include "llvm/Support/raw_ostream.h"
Daniel Dunbar708d8a82009-01-27 01:36:03 +000030#include "llvm/Target/TargetData.h"
Daniel Dunbard283e632009-02-03 01:05:53 +000031
32#include "ABIInfo.h"
33
Daniel Dunbara8f02052008-09-08 21:33:45 +000034using namespace clang;
35using namespace CodeGen;
36
37/***/
38
Daniel Dunbara8f02052008-09-08 21:33:45 +000039// FIXME: Use iterator and sidestep silly type array creation.
40
Daniel Dunbar34bda882009-02-02 23:23:47 +000041const
Douglas Gregor4fa58902009-02-26 23:50:07 +000042CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) {
Daniel Dunbar34bda882009-02-02 23:23:47 +000043 return getFunctionInfo(FTNP->getResultType(),
44 llvm::SmallVector<QualType, 16>());
Daniel Dunbar3ad1f072008-09-10 04:01:49 +000045}
46
Daniel Dunbar34bda882009-02-02 23:23:47 +000047const
Douglas Gregor4fa58902009-02-26 23:50:07 +000048CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) {
Daniel Dunbar34bda882009-02-02 23:23:47 +000049 llvm::SmallVector<QualType, 16> ArgTys;
50 // FIXME: Kill copy.
Daniel Dunbar3ad1f072008-09-10 04:01:49 +000051 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
Daniel Dunbar34bda882009-02-02 23:23:47 +000052 ArgTys.push_back(FTP->getArgType(i));
53 return getFunctionInfo(FTP->getResultType(), ArgTys);
Daniel Dunbar3ad1f072008-09-10 04:01:49 +000054}
55
Anders Carlsson7a785352009-04-03 22:48:58 +000056const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
57 llvm::SmallVector<QualType, 16> ArgTys;
58 // Add the 'this' pointer.
59 ArgTys.push_back(MD->getThisType(Context));
60
61 const FunctionProtoType *FTP = MD->getType()->getAsFunctionProtoType();
62 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
63 ArgTys.push_back(FTP->getArgType(i));
64 return getFunctionInfo(FTP->getResultType(), ArgTys);
65}
66
Daniel Dunbar34bda882009-02-02 23:23:47 +000067const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
Anders Carlsson7a785352009-04-03 22:48:58 +000068 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
69 if (MD->isInstance())
70 return getFunctionInfo(MD);
71 }
72
Daniel Dunbara8f02052008-09-08 21:33:45 +000073 const FunctionType *FTy = FD->getType()->getAsFunctionType();
Douglas Gregor4fa58902009-02-26 23:50:07 +000074 if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FTy))
Daniel Dunbar34bda882009-02-02 23:23:47 +000075 return getFunctionInfo(FTP);
Douglas Gregor4fa58902009-02-26 23:50:07 +000076 return getFunctionInfo(cast<FunctionNoProtoType>(FTy));
Daniel Dunbara8f02052008-09-08 21:33:45 +000077}
78
Daniel Dunbar34bda882009-02-02 23:23:47 +000079const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
80 llvm::SmallVector<QualType, 16> ArgTys;
81 ArgTys.push_back(MD->getSelfDecl()->getType());
82 ArgTys.push_back(Context.getObjCSelType());
83 // FIXME: Kill copy?
Chris Lattner9408eb12009-02-20 06:23:21 +000084 for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
Daniel Dunbara8f02052008-09-08 21:33:45 +000085 e = MD->param_end(); i != e; ++i)
Daniel Dunbar34bda882009-02-02 23:23:47 +000086 ArgTys.push_back((*i)->getType());
87 return getFunctionInfo(MD->getResultType(), ArgTys);
Daniel Dunbara8f02052008-09-08 21:33:45 +000088}
89
Daniel Dunbar34bda882009-02-02 23:23:47 +000090const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
91 const CallArgList &Args) {
92 // FIXME: Kill copy.
93 llvm::SmallVector<QualType, 16> ArgTys;
Daniel Dunbarebbb8f32009-01-31 02:19:00 +000094 for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
95 i != e; ++i)
Daniel Dunbar34bda882009-02-02 23:23:47 +000096 ArgTys.push_back(i->second);
97 return getFunctionInfo(ResTy, ArgTys);
Daniel Dunbarebbb8f32009-01-31 02:19:00 +000098}
99
Daniel Dunbar34bda882009-02-02 23:23:47 +0000100const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
101 const FunctionArgList &Args) {
102 // FIXME: Kill copy.
103 llvm::SmallVector<QualType, 16> ArgTys;
Daniel Dunbar9fc15a82009-02-02 21:43:58 +0000104 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
105 i != e; ++i)
Daniel Dunbar34bda882009-02-02 23:23:47 +0000106 ArgTys.push_back(i->second);
107 return getFunctionInfo(ResTy, ArgTys);
108}
109
110const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
111 const llvm::SmallVector<QualType, 16> &ArgTys) {
Daniel Dunbardcf19d12009-02-03 00:07:12 +0000112 // Lookup or create unique function info.
113 llvm::FoldingSetNodeID ID;
114 CGFunctionInfo::Profile(ID, ResTy, ArgTys.begin(), ArgTys.end());
115
116 void *InsertPos = 0;
117 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
118 if (FI)
119 return *FI;
120
Daniel Dunbare92e0ab2009-02-03 05:31:23 +0000121 // Construct the function info.
Daniel Dunbardcf19d12009-02-03 00:07:12 +0000122 FI = new CGFunctionInfo(ResTy, ArgTys);
Daniel Dunbarb944cc92009-02-05 00:00:23 +0000123 FunctionInfos.InsertNode(FI, InsertPos);
Daniel Dunbare92e0ab2009-02-03 05:31:23 +0000124
125 // Compute ABI information.
Daniel Dunbar749e36b2009-02-03 06:51:18 +0000126 getABIInfo().computeInfo(*FI, getContext());
Daniel Dunbare92e0ab2009-02-03 05:31:23 +0000127
Daniel Dunbardcf19d12009-02-03 00:07:12 +0000128 return *FI;
Daniel Dunbar34bda882009-02-02 23:23:47 +0000129}
130
131/***/
132
Daniel Dunbarf98eeff2008-10-13 17:02:26 +0000133ABIInfo::~ABIInfo() {}
134
Daniel Dunbar9f4874e2009-02-04 23:24:38 +0000135void ABIArgInfo::dump() const {
136 fprintf(stderr, "(ABIArgInfo Kind=");
137 switch (TheKind) {
138 case Direct:
139 fprintf(stderr, "Direct");
140 break;
Daniel Dunbar9f4874e2009-02-04 23:24:38 +0000141 case Ignore:
142 fprintf(stderr, "Ignore");
143 break;
144 case Coerce:
145 fprintf(stderr, "Coerce Type=");
146 getCoerceToType()->print(llvm::errs());
Daniel Dunbar9f4874e2009-02-04 23:24:38 +0000147 break;
Daniel Dunbar88dde9b2009-02-05 08:00:50 +0000148 case Indirect:
149 fprintf(stderr, "Indirect Align=%d", getIndirectAlign());
Daniel Dunbar9f4874e2009-02-04 23:24:38 +0000150 break;
151 case Expand:
152 fprintf(stderr, "Expand");
153 break;
154 }
155 fprintf(stderr, ")\n");
156}
157
158/***/
159
Daniel Dunbara7446422009-03-31 19:01:39 +0000160/// isEmptyRecord - Return true iff a structure has no non-empty
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000161/// members. Note that a structure with a flexible array member is not
162/// considered empty.
Douglas Gregorc55b0b02009-04-09 21:40:53 +0000163static bool isEmptyRecord(ASTContext &Context, QualType T) {
Daniel Dunbara7446422009-03-31 19:01:39 +0000164 const RecordType *RT = T->getAsRecordType();
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000165 if (!RT)
166 return 0;
167 const RecordDecl *RD = RT->getDecl();
168 if (RD->hasFlexibleArrayMember())
169 return false;
Douglas Gregorc55b0b02009-04-09 21:40:53 +0000170 for (RecordDecl::field_iterator i = RD->field_begin(Context),
171 e = RD->field_end(Context); i != e; ++i) {
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000172 const FieldDecl *FD = *i;
Douglas Gregorc55b0b02009-04-09 21:40:53 +0000173 if (!isEmptyRecord(Context, FD->getType()))
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000174 return false;
175 }
176 return true;
177}
178
179/// isSingleElementStruct - Determine if a structure is a "single
180/// element struct", i.e. it has exactly one non-empty field or
181/// exactly one field which is itself a single element
182/// struct. Structures with flexible array members are never
183/// considered single element structs.
184///
185/// \return The field declaration for the single non-empty field, if
186/// it exists.
Daniel Dunbar49b32d42009-04-01 07:08:38 +0000187static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000188 const RecordType *RT = T->getAsStructureType();
189 if (!RT)
190 return 0;
191
192 const RecordDecl *RD = RT->getDecl();
193 if (RD->hasFlexibleArrayMember())
194 return 0;
195
Daniel Dunbar49b32d42009-04-01 07:08:38 +0000196 const Type *Found = 0;
Douglas Gregorc55b0b02009-04-09 21:40:53 +0000197 for (RecordDecl::field_iterator i = RD->field_begin(Context),
198 e = RD->field_end(Context); i != e; ++i) {
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000199 const FieldDecl *FD = *i;
200 QualType FT = FD->getType();
201
Daniel Dunbar49b32d42009-04-01 07:08:38 +0000202 // Treat single element arrays as the element
203 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
204 if (AT->getSize().getZExtValue() == 1)
205 FT = AT->getElementType();
206
Daniel Dunbar6f3d7602009-05-08 21:04:47 +0000207 // Ignore empty records and padding bit-fields.
208 if (isEmptyRecord(Context, FT) ||
209 (FD->isBitField() && !FD->getIdentifier()))
210 continue;
211
212 if (Found)
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000213 return 0;
Daniel Dunbar6f3d7602009-05-08 21:04:47 +0000214
215 if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
Daniel Dunbar49b32d42009-04-01 07:08:38 +0000216 Found = FT.getTypePtr();
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000217 } else {
Daniel Dunbar49b32d42009-04-01 07:08:38 +0000218 Found = isSingleElementStruct(FT, Context);
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000219 if (!Found)
220 return 0;
221 }
222 }
223
224 return Found;
225}
226
227static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
228 if (!Ty->getAsBuiltinType() && !Ty->isPointerType())
229 return false;
230
231 uint64_t Size = Context.getTypeSize(Ty);
232 return Size == 32 || Size == 64;
233}
234
235static bool areAllFields32Or64BitBasicType(const RecordDecl *RD,
236 ASTContext &Context) {
Douglas Gregorc55b0b02009-04-09 21:40:53 +0000237 for (RecordDecl::field_iterator i = RD->field_begin(Context),
238 e = RD->field_end(Context); i != e; ++i) {
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000239 const FieldDecl *FD = *i;
240
241 if (!is32Or64BitBasicType(FD->getType(), Context))
242 return false;
243
Daniel Dunbaref495d42009-04-27 18:31:32 +0000244 // FIXME: Reject bit-fields wholesale; there are two problems, we
Daniel Dunbar9f052cb2009-03-11 22:05:26 +0000245 // don't know how to expand them yet, and the predicate for
246 // telling if a bitfield still counts as "basic" is more
247 // complicated than what we were doing previously.
248 if (FD->isBitField())
249 return false;
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000250 }
Daniel Dunbar9f052cb2009-03-11 22:05:26 +0000251
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000252 return true;
253}
254
Daniel Dunbarf98eeff2008-10-13 17:02:26 +0000255namespace {
256/// DefaultABIInfo - The default implementation for ABI specific
257/// details. This implementation provides information which results in
Daniel Dunbar749e36b2009-02-03 06:51:18 +0000258/// self-consistent and sensible LLVM IR generation, but does not
259/// conform to any particular ABI.
Daniel Dunbarf98eeff2008-10-13 17:02:26 +0000260class DefaultABIInfo : public ABIInfo {
Daniel Dunbar749e36b2009-02-03 06:51:18 +0000261 ABIArgInfo classifyReturnType(QualType RetTy,
262 ASTContext &Context) const;
263
264 ABIArgInfo classifyArgumentType(QualType RetTy,
265 ASTContext &Context) const;
Daniel Dunbarf98eeff2008-10-13 17:02:26 +0000266
Daniel Dunbar749e36b2009-02-03 06:51:18 +0000267 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
268 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
269 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
270 it != ie; ++it)
271 it->info = classifyArgumentType(it->type, Context);
272 }
Daniel Dunbar7fbcf9c2009-02-10 20:44:09 +0000273
274 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
275 CodeGenFunction &CGF) const;
Daniel Dunbarf98eeff2008-10-13 17:02:26 +0000276};
277
278/// X86_32ABIInfo - The X86-32 ABI information.
279class X86_32ABIInfo : public ABIInfo {
Douglas Gregorc55b0b02009-04-09 21:40:53 +0000280 ASTContext &Context;
Eli Friedman5e175802009-03-23 23:26:24 +0000281 bool IsDarwin;
282
Daniel Dunbar49b32d42009-04-01 07:08:38 +0000283 static bool isRegisterSize(unsigned Size) {
284 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
285 }
286
Daniel Dunbar558e7fb2009-04-01 07:45:00 +0000287 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
288
Daniel Dunbarf98eeff2008-10-13 17:02:26 +0000289public:
Daniel Dunbar749e36b2009-02-03 06:51:18 +0000290 ABIArgInfo classifyReturnType(QualType RetTy,
291 ASTContext &Context) const;
Daniel Dunbarf98eeff2008-10-13 17:02:26 +0000292
Daniel Dunbar749e36b2009-02-03 06:51:18 +0000293 ABIArgInfo classifyArgumentType(QualType RetTy,
294 ASTContext &Context) const;
295
296 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
297 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
298 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
299 it != ie; ++it)
300 it->info = classifyArgumentType(it->type, Context);
301 }
Daniel Dunbar7fbcf9c2009-02-10 20:44:09 +0000302
303 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
304 CodeGenFunction &CGF) const;
Eli Friedman5e175802009-03-23 23:26:24 +0000305
Douglas Gregorc55b0b02009-04-09 21:40:53 +0000306 X86_32ABIInfo(ASTContext &Context, bool d)
307 : ABIInfo(), Context(Context), IsDarwin(d) {}
Daniel Dunbarf98eeff2008-10-13 17:02:26 +0000308};
309}
310
Daniel Dunbar558e7fb2009-04-01 07:45:00 +0000311
312/// shouldReturnTypeInRegister - Determine if the given type should be
313/// passed in a register (for the Darwin ABI).
314bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
315 ASTContext &Context) {
316 uint64_t Size = Context.getTypeSize(Ty);
317
318 // Type must be register sized.
319 if (!isRegisterSize(Size))
320 return false;
321
322 if (Ty->isVectorType()) {
323 // 64- and 128- bit vectors inside structures are not returned in
324 // registers.
325 if (Size == 64 || Size == 128)
326 return false;
327
328 return true;
329 }
330
331 // If this is a builtin, pointer, or complex type, it is ok.
332 if (Ty->getAsBuiltinType() || Ty->isPointerType() || Ty->isAnyComplexType())
333 return true;
334
335 // Arrays are treated like records.
336 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
337 return shouldReturnTypeInRegister(AT->getElementType(), Context);
338
339 // Otherwise, it must be a record type.
340 const RecordType *RT = Ty->getAsRecordType();
341 if (!RT) return false;
342
343 // Structure types are passed in register if all fields would be
344 // passed in a register.
Douglas Gregorc55b0b02009-04-09 21:40:53 +0000345 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(Context),
346 e = RT->getDecl()->field_end(Context); i != e; ++i) {
Daniel Dunbar558e7fb2009-04-01 07:45:00 +0000347 const FieldDecl *FD = *i;
348
Daniel Dunbar558e7fb2009-04-01 07:45:00 +0000349 // Empty structures are ignored.
Douglas Gregorc55b0b02009-04-09 21:40:53 +0000350 if (isEmptyRecord(Context, FD->getType()))
Daniel Dunbar558e7fb2009-04-01 07:45:00 +0000351 continue;
352
Daniel Dunbarbfe39d42009-05-08 20:21:04 +0000353 // As are arrays of empty structures, but not generally, so we
354 // can't add this test higher in this routine.
355 if (const ConstantArrayType *AT =
356 Context.getAsConstantArrayType(FD->getType()))
357 if (isEmptyRecord(Context, AT->getElementType()))
358 continue;
359
Daniel Dunbar558e7fb2009-04-01 07:45:00 +0000360 // Check fields recursively.
361 if (!shouldReturnTypeInRegister(FD->getType(), Context))
362 return false;
363 }
364
365 return true;
366}
367
Daniel Dunbarf98eeff2008-10-13 17:02:26 +0000368ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
369 ASTContext &Context) const {
Daniel Dunbareec02622009-02-03 06:30:17 +0000370 if (RetTy->isVoidType()) {
371 return ABIArgInfo::getIgnore();
Daniel Dunbar2a7bb3f2009-04-01 06:13:08 +0000372 } else if (const VectorType *VT = RetTy->getAsVectorType()) {
373 // On Darwin, some vectors are returned in registers.
374 if (IsDarwin) {
375 uint64_t Size = Context.getTypeSize(RetTy);
376
377 // 128-bit vectors are a special case; they are returned in
378 // registers and we need to make sure to pick a type the LLVM
379 // backend will like.
380 if (Size == 128)
381 return ABIArgInfo::getCoerce(llvm::VectorType::get(llvm::Type::Int64Ty,
382 2));
383
384 // Always return in register if it fits in a general purpose
385 // register, or if it is 64 bits and has a single element.
386 if ((Size == 8 || Size == 16 || Size == 32) ||
387 (Size == 64 && VT->getNumElements() == 1))
388 return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
389
390 return ABIArgInfo::getIndirect(0);
391 }
392
393 return ABIArgInfo::getDirect();
Daniel Dunbareec02622009-02-03 06:30:17 +0000394 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
Daniel Dunbaref495d42009-04-27 18:31:32 +0000395 // Structures with flexible arrays are always indirect.
396 if (const RecordType *RT = RetTy->getAsStructureType())
397 if (RT->getDecl()->hasFlexibleArrayMember())
398 return ABIArgInfo::getIndirect(0);
399
Eli Friedman5e175802009-03-23 23:26:24 +0000400 // Outside of Darwin, structs and unions are always indirect.
401 if (!IsDarwin && !RetTy->isAnyComplexType())
402 return ABIArgInfo::getIndirect(0);
Daniel Dunbaref495d42009-04-27 18:31:32 +0000403
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000404 // Classify "single element" structs as their element type.
Daniel Dunbar49b32d42009-04-01 07:08:38 +0000405 if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000406 if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) {
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000407 if (BT->isIntegerType()) {
Daniel Dunbar9f4a5a42009-05-08 21:30:11 +0000408 // We need to use the size of the structure, padding
409 // bit-fields can adjust that to be larger than the single
410 // element type.
411 uint64_t Size = Context.getTypeSize(RetTy);
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000412 return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size));
413 } else if (BT->getKind() == BuiltinType::Float) {
Daniel Dunbar9f4a5a42009-05-08 21:30:11 +0000414 assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
415 "Unexpect single element structure size!");
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000416 return ABIArgInfo::getCoerce(llvm::Type::FloatTy);
417 } else if (BT->getKind() == BuiltinType::Double) {
Daniel Dunbar9f4a5a42009-05-08 21:30:11 +0000418 assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
419 "Unexpect single element structure size!");
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000420 return ABIArgInfo::getCoerce(llvm::Type::DoubleTy);
421 }
422 } else if (SeltTy->isPointerType()) {
423 // FIXME: It would be really nice if this could come out as
424 // the proper pointer type.
425 llvm::Type *PtrTy =
426 llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
427 return ABIArgInfo::getCoerce(PtrTy);
Daniel Dunbar49b32d42009-04-01 07:08:38 +0000428 } else if (SeltTy->isVectorType()) {
429 // 64- and 128-bit vectors are never returned in a
430 // register when inside a structure.
431 uint64_t Size = Context.getTypeSize(RetTy);
432 if (Size == 64 || Size == 128)
433 return ABIArgInfo::getIndirect(0);
434
435 return classifyReturnType(QualType(SeltTy, 0), Context);
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000436 }
437 }
438
Daniel Dunbar73d66602008-09-10 07:04:09 +0000439 uint64_t Size = Context.getTypeSize(RetTy);
Daniel Dunbar558e7fb2009-04-01 07:45:00 +0000440 if (isRegisterSize(Size)) {
441 // Always return in register for unions for now.
442 // FIXME: This is wrong, but better than treating as a
443 // structure.
444 if (RetTy->isUnionType())
445 return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
446
447 // Small structures which are register sized are generally returned
448 // in a register.
449 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context))
450 return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size));
451 }
Daniel Dunbar49b32d42009-04-01 07:08:38 +0000452
453 return ABIArgInfo::getIndirect(0);
Daniel Dunbare126ab12008-09-10 02:41:04 +0000454 } else {
Daniel Dunbareec02622009-02-03 06:30:17 +0000455 return ABIArgInfo::getDirect();
Daniel Dunbare126ab12008-09-10 02:41:04 +0000456 }
457}
458
Daniel Dunbarf98eeff2008-10-13 17:02:26 +0000459ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
Daniel Dunbar7fbcf9c2009-02-10 20:44:09 +0000460 ASTContext &Context) const {
Daniel Dunbar88dde9b2009-02-05 08:00:50 +0000461 // FIXME: Set alignment on indirect arguments.
Daniel Dunbar3158c592008-09-17 20:11:04 +0000462 if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
Daniel Dunbar88dde9b2009-02-05 08:00:50 +0000463 // Structures with flexible arrays are always indirect.
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000464 if (const RecordType *RT = Ty->getAsStructureType())
465 if (RT->getDecl()->hasFlexibleArrayMember())
Daniel Dunbar88dde9b2009-02-05 08:00:50 +0000466 return ABIArgInfo::getIndirect(0);
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000467
Daniel Dunbar33b189a2009-02-05 01:50:07 +0000468 // Ignore empty structs.
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000469 uint64_t Size = Context.getTypeSize(Ty);
470 if (Ty->isStructureType() && Size == 0)
Daniel Dunbar33b189a2009-02-05 01:50:07 +0000471 return ABIArgInfo::getIgnore();
Daniel Dunbar99eebc62008-09-17 21:22:33 +0000472
473 // Expand structs with size <= 128-bits which consist only of
474 // basic types (int, long long, float, double, xxx*). This is
475 // non-recursive and does not ignore empty fields.
476 if (const RecordType *RT = Ty->getAsStructureType()) {
477 if (Context.getTypeSize(Ty) <= 4*32 &&
478 areAllFields32Or64BitBasicType(RT->getDecl(), Context))
479 return ABIArgInfo::getExpand();
480 }
481
Daniel Dunbar88dde9b2009-02-05 08:00:50 +0000482 return ABIArgInfo::getIndirect(0);
Daniel Dunbar22e30052008-09-11 01:48:57 +0000483 } else {
Daniel Dunbareec02622009-02-03 06:30:17 +0000484 return ABIArgInfo::getDirect();
Daniel Dunbar22e30052008-09-11 01:48:57 +0000485 }
486}
487
Daniel Dunbar7fbcf9c2009-02-10 20:44:09 +0000488llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
489 CodeGenFunction &CGF) const {
490 const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
491 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
492
493 CGBuilderTy &Builder = CGF.Builder;
494 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
495 "ap");
496 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
497 llvm::Type *PTy =
498 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
499 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
500
Daniel Dunbarbae4b662009-02-18 22:28:45 +0000501 uint64_t Offset =
502 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
Daniel Dunbar7fbcf9c2009-02-10 20:44:09 +0000503 llvm::Value *NextAddr =
504 Builder.CreateGEP(Addr,
Daniel Dunbarbae4b662009-02-18 22:28:45 +0000505 llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset),
Daniel Dunbar7fbcf9c2009-02-10 20:44:09 +0000506 "ap.next");
507 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
508
509 return AddrTyped;
510}
511
Daniel Dunbare09a9692009-01-24 08:32:22 +0000512namespace {
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000513/// X86_64ABIInfo - The X86_64 ABI information.
Daniel Dunbare09a9692009-01-24 08:32:22 +0000514class X86_64ABIInfo : public ABIInfo {
515 enum Class {
516 Integer = 0,
517 SSE,
518 SSEUp,
519 X87,
520 X87Up,
521 ComplexX87,
522 NoClass,
523 Memory
524 };
525
Daniel Dunbar11dc6772009-01-30 08:09:32 +0000526 /// merge - Implement the X86_64 ABI merging algorithm.
527 ///
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000528 /// Merge an accumulating classification \arg Accum with a field
529 /// classification \arg Field.
530 ///
531 /// \param Accum - The accumulating classification. This should
532 /// always be either NoClass or the result of a previous merge
533 /// call. In addition, this should never be Memory (the caller
534 /// should just return Memory for the aggregate).
535 Class merge(Class Accum, Class Field) const;
Daniel Dunbar11dc6772009-01-30 08:09:32 +0000536
Daniel Dunbare09a9692009-01-24 08:32:22 +0000537 /// classify - Determine the x86_64 register classes in which the
538 /// given type T should be passed.
539 ///
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000540 /// \param Lo - The classification for the parts of the type
541 /// residing in the low word of the containing object.
542 ///
543 /// \param Hi - The classification for the parts of the type
544 /// residing in the high word of the containing object.
545 ///
546 /// \param OffsetBase - The bit offset of this type in the
Daniel Dunbar2a2dce32009-01-30 22:40:15 +0000547 /// containing object. Some parameters are classified different
548 /// depending on whether they straddle an eightbyte boundary.
Daniel Dunbare09a9692009-01-24 08:32:22 +0000549 ///
550 /// If a word is unused its result will be NoClass; if a type should
551 /// be passed in Memory then at least the classification of \arg Lo
552 /// will be Memory.
553 ///
554 /// The \arg Lo class will be NoClass iff the argument is ignored.
555 ///
556 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
Daniel Dunbar92e88642009-02-17 07:55:55 +0000557 /// also be ComplexX87.
Daniel Dunbar1aa2be92009-01-30 00:47:38 +0000558 void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
Daniel Dunbare09a9692009-01-24 08:32:22 +0000559 Class &Lo, Class &Hi) const;
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000560
Daniel Dunbar87c4dc92009-02-14 02:09:24 +0000561 /// getCoerceResult - Given a source type \arg Ty and an LLVM type
562 /// to coerce to, chose the best way to pass Ty in the same place
563 /// that \arg CoerceTo would be passed, but while keeping the
564 /// emitted code as simple as possible.
565 ///
566 /// FIXME: Note, this should be cleaned up to just take an
567 /// enumeration of all the ways we might want to pass things,
568 /// instead of constructing an LLVM type. This makes this code more
569 /// explicit, and it makes it clearer that we are also doing this
570 /// for correctness in the case of passing scalar types.
571 ABIArgInfo getCoerceResult(QualType Ty,
572 const llvm::Type *CoerceTo,
573 ASTContext &Context) const;
574
Daniel Dunbar749e36b2009-02-03 06:51:18 +0000575 ABIArgInfo classifyReturnType(QualType RetTy,
Daniel Dunbar015bc8e2009-02-03 20:00:13 +0000576 ASTContext &Context) const;
577
578 ABIArgInfo classifyArgumentType(QualType Ty,
579 ASTContext &Context,
Daniel Dunbare978cb92009-02-10 17:06:09 +0000580 unsigned &neededInt,
581 unsigned &neededSSE) const;
Daniel Dunbar015bc8e2009-02-03 20:00:13 +0000582
583public:
584 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
Daniel Dunbar7fbcf9c2009-02-10 20:44:09 +0000585
586 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
587 CodeGenFunction &CGF) const;
Daniel Dunbare09a9692009-01-24 08:32:22 +0000588};
589}
590
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000591X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
592 Class Field) const {
Daniel Dunbar11dc6772009-01-30 08:09:32 +0000593 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
594 // classified recursively so that always two fields are
595 // considered. The resulting class is calculated according to
596 // the classes of the fields in the eightbyte:
597 //
598 // (a) If both classes are equal, this is the resulting class.
599 //
600 // (b) If one of the classes is NO_CLASS, the resulting class is
601 // the other class.
602 //
603 // (c) If one of the classes is MEMORY, the result is the MEMORY
604 // class.
605 //
606 // (d) If one of the classes is INTEGER, the result is the
607 // INTEGER.
608 //
609 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
610 // MEMORY is used as class.
611 //
612 // (f) Otherwise class SSE is used.
Daniel Dunbar78d7d452009-03-06 17:50:25 +0000613
614 // Accum should never be memory (we should have returned) or
615 // ComplexX87 (because this cannot be passed in a structure).
616 assert((Accum != Memory && Accum != ComplexX87) &&
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000617 "Invalid accumulated classification during merge.");
618 if (Accum == Field || Field == NoClass)
619 return Accum;
620 else if (Field == Memory)
621 return Memory;
622 else if (Accum == NoClass)
623 return Field;
624 else if (Accum == Integer || Field == Integer)
625 return Integer;
626 else if (Field == X87 || Field == X87Up || Field == ComplexX87)
627 return Memory;
Daniel Dunbar11dc6772009-01-30 08:09:32 +0000628 else
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000629 return SSE;
Daniel Dunbar11dc6772009-01-30 08:09:32 +0000630}
631
Daniel Dunbare09a9692009-01-24 08:32:22 +0000632void X86_64ABIInfo::classify(QualType Ty,
633 ASTContext &Context,
Daniel Dunbar1aa2be92009-01-30 00:47:38 +0000634 uint64_t OffsetBase,
Daniel Dunbare09a9692009-01-24 08:32:22 +0000635 Class &Lo, Class &Hi) const {
Daniel Dunbar36b378e2009-02-02 18:06:39 +0000636 // FIXME: This code can be simplified by introducing a simple value
637 // class for Class pairs with appropriate constructor methods for
638 // the various situations.
639
Daniel Dunbard97f5952009-02-22 04:48:22 +0000640 // FIXME: Some of the split computations are wrong; unaligned
641 // vectors shouldn't be passed in registers for example, so there is
642 // no chance they can straddle an eightbyte. Verify & simplify.
643
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000644 Lo = Hi = NoClass;
645
646 Class &Current = OffsetBase < 64 ? Lo : Hi;
647 Current = Memory;
648
Daniel Dunbare09a9692009-01-24 08:32:22 +0000649 if (const BuiltinType *BT = Ty->getAsBuiltinType()) {
650 BuiltinType::Kind k = BT->getKind();
651
Daniel Dunbar1358b202009-01-26 21:26:08 +0000652 if (k == BuiltinType::Void) {
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000653 Current = NoClass;
Chris Lattner6cc7e412009-04-30 02:43:43 +0000654 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
Chris Lattner3d8e0682009-04-30 06:22:07 +0000655 Lo = Integer;
656 Hi = Integer;
Daniel Dunbar1358b202009-01-26 21:26:08 +0000657 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000658 Current = Integer;
Daniel Dunbare09a9692009-01-24 08:32:22 +0000659 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000660 Current = SSE;
Daniel Dunbare09a9692009-01-24 08:32:22 +0000661 } else if (k == BuiltinType::LongDouble) {
662 Lo = X87;
663 Hi = X87Up;
664 }
Daniel Dunbarcf1f3be2009-01-27 02:01:34 +0000665 // FIXME: _Decimal32 and _Decimal64 are SSE.
666 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
Anders Carlsson1d234462009-02-26 17:31:15 +0000667 } else if (const EnumType *ET = Ty->getAsEnumType()) {
668 // Classify the underlying integer type.
669 classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
Daniel Dunbarfc096bf2009-02-26 20:52:22 +0000670 } else if (Ty->hasPointerRepresentation()) {
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000671 Current = Integer;
Daniel Dunbarcf1f3be2009-01-27 02:01:34 +0000672 } else if (const VectorType *VT = Ty->getAsVectorType()) {
Daniel Dunbar1aa2be92009-01-30 00:47:38 +0000673 uint64_t Size = Context.getTypeSize(VT);
Daniel Dunbard97f5952009-02-22 04:48:22 +0000674 if (Size == 32) {
675 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
676 // float> as integer.
677 Current = Integer;
678
679 // If this type crosses an eightbyte boundary, it should be
680 // split.
681 uint64_t EB_Real = (OffsetBase) / 64;
682 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
683 if (EB_Real != EB_Imag)
684 Hi = Lo;
685 } else if (Size == 64) {
Daniel Dunbarb341feb2009-02-22 04:16:10 +0000686 // gcc passes <1 x double> in memory. :(
687 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
Daniel Dunbarcdf91e82009-01-30 19:38:39 +0000688 return;
Daniel Dunbarb341feb2009-02-22 04:16:10 +0000689
690 // gcc passes <1 x long long> as INTEGER.
691 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
692 Current = Integer;
693 else
694 Current = SSE;
Daniel Dunbare413f532009-01-30 18:40:10 +0000695
696 // If this type crosses an eightbyte boundary, it should be
697 // split.
Daniel Dunbar2a2dce32009-01-30 22:40:15 +0000698 if (OffsetBase && OffsetBase != 64)
Daniel Dunbare413f532009-01-30 18:40:10 +0000699 Hi = Lo;
Daniel Dunbarcf1f3be2009-01-27 02:01:34 +0000700 } else if (Size == 128) {
701 Lo = SSE;
702 Hi = SSEUp;
703 }
Daniel Dunbare09a9692009-01-24 08:32:22 +0000704 } else if (const ComplexType *CT = Ty->getAsComplexType()) {
Daniel Dunbare60d5332009-02-14 02:45:45 +0000705 QualType ET = Context.getCanonicalType(CT->getElementType());
Daniel Dunbare09a9692009-01-24 08:32:22 +0000706
Daniel Dunbare413f532009-01-30 18:40:10 +0000707 uint64_t Size = Context.getTypeSize(Ty);
Daniel Dunbarb341feb2009-02-22 04:16:10 +0000708 if (ET->isIntegralType()) {
Daniel Dunbar28770fc2009-01-29 07:22:20 +0000709 if (Size <= 64)
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000710 Current = Integer;
Daniel Dunbar28770fc2009-01-29 07:22:20 +0000711 else if (Size <= 128)
712 Lo = Hi = Integer;
713 } else if (ET == Context.FloatTy)
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000714 Current = SSE;
Daniel Dunbare09a9692009-01-24 08:32:22 +0000715 else if (ET == Context.DoubleTy)
716 Lo = Hi = SSE;
717 else if (ET == Context.LongDoubleTy)
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000718 Current = ComplexX87;
Daniel Dunbar6a7f8b32009-01-29 09:42:07 +0000719
720 // If this complex type crosses an eightbyte boundary then it
721 // should be split.
Daniel Dunbar2a2dce32009-01-30 22:40:15 +0000722 uint64_t EB_Real = (OffsetBase) / 64;
723 uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
Daniel Dunbar6a7f8b32009-01-29 09:42:07 +0000724 if (Hi == NoClass && EB_Real != EB_Imag)
725 Hi = Lo;
Daniel Dunbar11dc6772009-01-30 08:09:32 +0000726 } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
727 // Arrays are treated like structures.
728
729 uint64_t Size = Context.getTypeSize(Ty);
730
731 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
732 // than two eightbytes, ..., it has class MEMORY.
733 if (Size > 128)
734 return;
735
736 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
737 // fields, it has class MEMORY.
738 //
739 // Only need to check alignment of array base.
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000740 if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
Daniel Dunbar11dc6772009-01-30 08:09:32 +0000741 return;
Daniel Dunbar11dc6772009-01-30 08:09:32 +0000742
743 // Otherwise implement simplified merge. We could be smarter about
744 // this, but it isn't worth it and would be harder to verify.
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000745 Current = NoClass;
Daniel Dunbar11dc6772009-01-30 08:09:32 +0000746 uint64_t EltSize = Context.getTypeSize(AT->getElementType());
747 uint64_t ArraySize = AT->getSize().getZExtValue();
748 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
749 Class FieldLo, FieldHi;
750 classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000751 Lo = merge(Lo, FieldLo);
752 Hi = merge(Hi, FieldHi);
753 if (Lo == Memory || Hi == Memory)
754 break;
Daniel Dunbar11dc6772009-01-30 08:09:32 +0000755 }
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000756
757 // Do post merger cleanup (see below). Only case we worry about is Memory.
758 if (Hi == Memory)
759 Lo = Memory;
760 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
Daniel Dunbar51a2d192009-01-29 08:13:58 +0000761 } else if (const RecordType *RT = Ty->getAsRecordType()) {
Daniel Dunbar1aa2be92009-01-30 00:47:38 +0000762 uint64_t Size = Context.getTypeSize(Ty);
Daniel Dunbar51a2d192009-01-29 08:13:58 +0000763
764 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
765 // than two eightbytes, ..., it has class MEMORY.
766 if (Size > 128)
767 return;
768
769 const RecordDecl *RD = RT->getDecl();
770
771 // Assume variable sized types are passed in memory.
772 if (RD->hasFlexibleArrayMember())
773 return;
774
775 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
776
777 // Reset Lo class, this will be recomputed.
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000778 Current = NoClass;
Daniel Dunbar51a2d192009-01-29 08:13:58 +0000779 unsigned idx = 0;
Douglas Gregorc55b0b02009-04-09 21:40:53 +0000780 for (RecordDecl::field_iterator i = RD->field_begin(Context),
781 e = RD->field_end(Context); i != e; ++i, ++idx) {
Daniel Dunbar11dc6772009-01-30 08:09:32 +0000782 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
Daniel Dunbard6fb35c2009-02-17 02:45:44 +0000783 bool BitField = i->isBitField();
Daniel Dunbar51a2d192009-01-29 08:13:58 +0000784
Daniel Dunbar11dc6772009-01-30 08:09:32 +0000785 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
786 // fields, it has class MEMORY.
Daniel Dunbard6fb35c2009-02-17 02:45:44 +0000787 //
Daniel Dunbaref495d42009-04-27 18:31:32 +0000788 // Note, skip this test for bit-fields, see below.
Daniel Dunbard6fb35c2009-02-17 02:45:44 +0000789 if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
Daniel Dunbar51a2d192009-01-29 08:13:58 +0000790 Lo = Memory;
791 return;
792 }
793
Daniel Dunbar51a2d192009-01-29 08:13:58 +0000794 // Classify this field.
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000795 //
796 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
797 // exceeds a single eightbyte, each is classified
798 // separately. Each eightbyte gets initialized to class
799 // NO_CLASS.
Daniel Dunbar51a2d192009-01-29 08:13:58 +0000800 Class FieldLo, FieldHi;
Daniel Dunbard6fb35c2009-02-17 02:45:44 +0000801
Daniel Dunbaref495d42009-04-27 18:31:32 +0000802 // Bit-fields require special handling, they do not force the
Daniel Dunbard6fb35c2009-02-17 02:45:44 +0000803 // structure to be passed in memory even if unaligned, and
804 // therefore they can straddle an eightbyte.
805 if (BitField) {
806 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
Eli Friedman5255e7a2009-04-26 19:19:15 +0000807 uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
Daniel Dunbard6fb35c2009-02-17 02:45:44 +0000808
809 uint64_t EB_Lo = Offset / 64;
810 uint64_t EB_Hi = (Offset + Size - 1) / 64;
811 FieldLo = FieldHi = NoClass;
812 if (EB_Lo) {
813 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
814 FieldLo = NoClass;
815 FieldHi = Integer;
816 } else {
817 FieldLo = Integer;
818 FieldHi = EB_Hi ? Integer : NoClass;
819 }
820 } else
821 classify(i->getType(), Context, Offset, FieldLo, FieldHi);
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000822 Lo = merge(Lo, FieldLo);
823 Hi = merge(Hi, FieldHi);
824 if (Lo == Memory || Hi == Memory)
825 break;
Daniel Dunbar51a2d192009-01-29 08:13:58 +0000826 }
827
828 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
829 //
830 // (a) If one of the classes is MEMORY, the whole argument is
831 // passed in memory.
832 //
833 // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
834
835 // The first of these conditions is guaranteed by how we implement
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000836 // the merge (just bail).
837 //
838 // The second condition occurs in the case of unions; for example
839 // union { _Complex double; unsigned; }.
840 if (Hi == Memory)
841 Lo = Memory;
Daniel Dunbar51a2d192009-01-29 08:13:58 +0000842 if (Hi == SSEUp && Lo != SSE)
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000843 Hi = SSE;
Daniel Dunbare09a9692009-01-24 08:32:22 +0000844 }
845}
846
Daniel Dunbar87c4dc92009-02-14 02:09:24 +0000847ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
848 const llvm::Type *CoerceTo,
849 ASTContext &Context) const {
850 if (CoerceTo == llvm::Type::Int64Ty) {
851 // Integer and pointer types will end up in a general purpose
852 // register.
Daniel Dunbarb341feb2009-02-22 04:16:10 +0000853 if (Ty->isIntegralType() || Ty->isPointerType())
Daniel Dunbar87c4dc92009-02-14 02:09:24 +0000854 return ABIArgInfo::getDirect();
Daniel Dunbarb341feb2009-02-22 04:16:10 +0000855
Daniel Dunbar87c4dc92009-02-14 02:09:24 +0000856 } else if (CoerceTo == llvm::Type::DoubleTy) {
Daniel Dunbare60d5332009-02-14 02:45:45 +0000857 // FIXME: It would probably be better to make CGFunctionInfo only
858 // map using canonical types than to canonize here.
859 QualType CTy = Context.getCanonicalType(Ty);
860
Daniel Dunbar87c4dc92009-02-14 02:09:24 +0000861 // Float and double end up in a single SSE reg.
Daniel Dunbare60d5332009-02-14 02:45:45 +0000862 if (CTy == Context.FloatTy || CTy == Context.DoubleTy)
Daniel Dunbar87c4dc92009-02-14 02:09:24 +0000863 return ABIArgInfo::getDirect();
Daniel Dunbarb341feb2009-02-22 04:16:10 +0000864
Daniel Dunbar87c4dc92009-02-14 02:09:24 +0000865 }
866
867 return ABIArgInfo::getCoerce(CoerceTo);
868}
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000869
Daniel Dunbarb6d5c442009-01-15 18:18:40 +0000870ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
871 ASTContext &Context) const {
Daniel Dunbare09a9692009-01-24 08:32:22 +0000872 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
873 // classification algorithm.
874 X86_64ABIInfo::Class Lo, Hi;
Daniel Dunbar6a7f8b32009-01-29 09:42:07 +0000875 classify(RetTy, Context, 0, Lo, Hi);
Daniel Dunbare09a9692009-01-24 08:32:22 +0000876
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000877 // Check some invariants.
878 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
879 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
880 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
881
Daniel Dunbare09a9692009-01-24 08:32:22 +0000882 const llvm::Type *ResType = 0;
883 switch (Lo) {
884 case NoClass:
Daniel Dunbar1358b202009-01-26 21:26:08 +0000885 return ABIArgInfo::getIgnore();
Daniel Dunbare09a9692009-01-24 08:32:22 +0000886
887 case SSEUp:
888 case X87Up:
889 assert(0 && "Invalid classification for lo word.");
890
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000891 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
Daniel Dunbar88dde9b2009-02-05 08:00:50 +0000892 // hidden argument.
Daniel Dunbare09a9692009-01-24 08:32:22 +0000893 case Memory:
Daniel Dunbar88dde9b2009-02-05 08:00:50 +0000894 return ABIArgInfo::getIndirect(0);
Daniel Dunbare09a9692009-01-24 08:32:22 +0000895
896 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
897 // available register of the sequence %rax, %rdx is used.
898 case Integer:
899 ResType = llvm::Type::Int64Ty; break;
900
901 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
902 // available SSE register of the sequence %xmm0, %xmm1 is used.
903 case SSE:
904 ResType = llvm::Type::DoubleTy; break;
905
906 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
907 // returned on the X87 stack in %st0 as 80-bit x87 number.
908 case X87:
909 ResType = llvm::Type::X86_FP80Ty; break;
910
Daniel Dunbar64b132f2009-01-31 00:06:58 +0000911 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
912 // part of the value is returned in %st0 and the imaginary part in
913 // %st1.
Daniel Dunbare09a9692009-01-24 08:32:22 +0000914 case ComplexX87:
Daniel Dunbar92e88642009-02-17 07:55:55 +0000915 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
Daniel Dunbar4fc0d492009-02-18 03:44:19 +0000916 ResType = llvm::StructType::get(llvm::Type::X86_FP80Ty,
917 llvm::Type::X86_FP80Ty,
918 NULL);
Daniel Dunbare09a9692009-01-24 08:32:22 +0000919 break;
920 }
921
922 switch (Hi) {
Daniel Dunbar92e88642009-02-17 07:55:55 +0000923 // Memory was handled previously and X87 should
924 // never occur as a hi class.
Daniel Dunbare09a9692009-01-24 08:32:22 +0000925 case Memory:
926 case X87:
Daniel Dunbare09a9692009-01-24 08:32:22 +0000927 assert(0 && "Invalid classification for hi word.");
928
Daniel Dunbar92e88642009-02-17 07:55:55 +0000929 case ComplexX87: // Previously handled.
Daniel Dunbare09a9692009-01-24 08:32:22 +0000930 case NoClass: break;
Daniel Dunbar92e88642009-02-17 07:55:55 +0000931
Daniel Dunbare09a9692009-01-24 08:32:22 +0000932 case Integer:
Daniel Dunbar7e8a7022009-01-29 07:36:07 +0000933 ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
934 break;
Daniel Dunbare09a9692009-01-24 08:32:22 +0000935 case SSE:
Daniel Dunbar7e8a7022009-01-29 07:36:07 +0000936 ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
937 break;
Daniel Dunbare09a9692009-01-24 08:32:22 +0000938
939 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
940 // is passed in the upper half of the last used SSE register.
941 //
942 // SSEUP should always be preceeded by SSE, just widen.
943 case SSEUp:
944 assert(Lo == SSE && "Unexpected SSEUp classification.");
945 ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
946 break;
947
948 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
Daniel Dunbar7e8a7022009-01-29 07:36:07 +0000949 // returned together with the previous X87 value in %st0.
Daniel Dunbare09a9692009-01-24 08:32:22 +0000950 case X87Up:
Daniel Dunbar78d7d452009-03-06 17:50:25 +0000951 // If X87Up is preceeded by X87, we don't need to do
952 // anything. However, in some cases with unions it may not be
953 // preceeded by X87. In such situations we follow gcc and pass the
954 // extra bits in an SSE reg.
955 if (Lo != X87)
956 ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
Daniel Dunbare09a9692009-01-24 08:32:22 +0000957 break;
958 }
959
Daniel Dunbar87c4dc92009-02-14 02:09:24 +0000960 return getCoerceResult(RetTy, ResType, Context);
Daniel Dunbarb6d5c442009-01-15 18:18:40 +0000961}
962
Daniel Dunbar015bc8e2009-02-03 20:00:13 +0000963ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
Daniel Dunbare978cb92009-02-10 17:06:09 +0000964 unsigned &neededInt,
965 unsigned &neededSSE) const {
Daniel Dunbar015bc8e2009-02-03 20:00:13 +0000966 X86_64ABIInfo::Class Lo, Hi;
967 classify(Ty, Context, 0, Lo, Hi);
968
969 // Check some invariants.
970 // FIXME: Enforce these by construction.
971 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
972 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
973 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
974
Daniel Dunbare978cb92009-02-10 17:06:09 +0000975 neededInt = 0;
976 neededSSE = 0;
Daniel Dunbar015bc8e2009-02-03 20:00:13 +0000977 const llvm::Type *ResType = 0;
978 switch (Lo) {
979 case NoClass:
980 return ABIArgInfo::getIgnore();
981
982 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
983 // on the stack.
984 case Memory:
985
986 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
987 // COMPLEX_X87, it is passed in memory.
988 case X87:
989 case ComplexX87:
Daniel Dunbard0536ac2009-02-22 08:17:51 +0000990 return ABIArgInfo::getIndirect(0);
Daniel Dunbar015bc8e2009-02-03 20:00:13 +0000991
992 case SSEUp:
993 case X87Up:
994 assert(0 && "Invalid classification for lo word.");
995
996 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
997 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
998 // and %r9 is used.
999 case Integer:
1000 ++neededInt;
1001 ResType = llvm::Type::Int64Ty;
1002 break;
1003
1004 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
1005 // available SSE register is used, the registers are taken in the
1006 // order from %xmm0 to %xmm7.
1007 case SSE:
1008 ++neededSSE;
1009 ResType = llvm::Type::DoubleTy;
1010 break;
Daniel Dunbareec02622009-02-03 06:30:17 +00001011 }
Daniel Dunbar015bc8e2009-02-03 20:00:13 +00001012
1013 switch (Hi) {
1014 // Memory was handled previously, ComplexX87 and X87 should
1015 // never occur as hi classes, and X87Up must be preceed by X87,
1016 // which is passed in memory.
1017 case Memory:
1018 case X87:
Daniel Dunbar015bc8e2009-02-03 20:00:13 +00001019 case ComplexX87:
1020 assert(0 && "Invalid classification for hi word.");
Daniel Dunbar78d7d452009-03-06 17:50:25 +00001021 break;
Daniel Dunbar015bc8e2009-02-03 20:00:13 +00001022
1023 case NoClass: break;
1024 case Integer:
1025 ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL);
1026 ++neededInt;
1027 break;
Daniel Dunbar78d7d452009-03-06 17:50:25 +00001028
1029 // X87Up generally doesn't occur here (long double is passed in
1030 // memory), except in situations involving unions.
1031 case X87Up:
1032 case SSE:
Daniel Dunbar015bc8e2009-02-03 20:00:13 +00001033 ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL);
1034 ++neededSSE;
1035 break;
1036
1037 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
1038 // eightbyte is passed in the upper half of the last used SSE
1039 // register.
1040 case SSEUp:
1041 assert(Lo == SSE && "Unexpected SSEUp classification.");
1042 ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2);
1043 break;
1044 }
1045
Daniel Dunbar87c4dc92009-02-14 02:09:24 +00001046 return getCoerceResult(Ty, ResType, Context);
Daniel Dunbar015bc8e2009-02-03 20:00:13 +00001047}
1048
1049void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
1050 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
1051
1052 // Keep track of the number of assigned registers.
1053 unsigned freeIntRegs = 6, freeSSERegs = 8;
1054
1055 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
1056 // get assigned (in left-to-right order) for passing as follows...
1057 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
Daniel Dunbare978cb92009-02-10 17:06:09 +00001058 it != ie; ++it) {
1059 unsigned neededInt, neededSSE;
1060 it->info = classifyArgumentType(it->type, Context, neededInt, neededSSE);
1061
1062 // AMD64-ABI 3.2.3p3: If there are no registers available for any
1063 // eightbyte of an argument, the whole argument is passed on the
1064 // stack. If registers have already been assigned for some
1065 // eightbytes of such an argument, the assignments get reverted.
1066 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
1067 freeIntRegs -= neededInt;
1068 freeSSERegs -= neededSSE;
1069 } else {
Daniel Dunbard0536ac2009-02-22 08:17:51 +00001070 it->info = ABIArgInfo::getIndirect(0);
Daniel Dunbare978cb92009-02-10 17:06:09 +00001071 }
1072 }
Daniel Dunbarb6d5c442009-01-15 18:18:40 +00001073}
1074
Daniel Dunbar3cfcec72009-02-12 09:04:14 +00001075static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
1076 QualType Ty,
1077 CodeGenFunction &CGF) {
1078 llvm::Value *overflow_arg_area_p =
1079 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
1080 llvm::Value *overflow_arg_area =
1081 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
1082
1083 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
1084 // byte boundary if alignment needed by type exceeds 8 byte boundary.
Daniel Dunbar2ab71bd2009-02-16 23:38:56 +00001085 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
Daniel Dunbar3cfcec72009-02-12 09:04:14 +00001086 if (Align > 8) {
Daniel Dunbar2ab71bd2009-02-16 23:38:56 +00001087 // Note that we follow the ABI & gcc here, even though the type
1088 // could in theory have an alignment greater than 16. This case
1089 // shouldn't ever matter in practice.
Daniel Dunbar3cfcec72009-02-12 09:04:14 +00001090
Daniel Dunbar2ab71bd2009-02-16 23:38:56 +00001091 // overflow_arg_area = (overflow_arg_area + 15) & ~15;
1092 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 15);
1093 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
1094 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
1095 llvm::Type::Int64Ty);
1096 llvm::Value *Mask = llvm::ConstantInt::get(llvm::Type::Int64Ty, ~15LL);
1097 overflow_arg_area =
1098 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1099 overflow_arg_area->getType(),
1100 "overflow_arg_area.align");
Daniel Dunbar3cfcec72009-02-12 09:04:14 +00001101 }
1102
1103 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
1104 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1105 llvm::Value *Res =
1106 CGF.Builder.CreateBitCast(overflow_arg_area,
1107 llvm::PointerType::getUnqual(LTy));
1108
1109 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
1110 // l->overflow_arg_area + sizeof(type).
1111 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
1112 // an 8 byte boundary.
1113
1114 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
1115 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
1116 (SizeInBytes + 7) & ~7);
1117 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
1118 "overflow_arg_area.next");
1119 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
1120
1121 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
1122 return Res;
1123}
1124
Daniel Dunbar7fbcf9c2009-02-10 20:44:09 +00001125llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1126 CodeGenFunction &CGF) const {
Daniel Dunbar3cfcec72009-02-12 09:04:14 +00001127 // Assume that va_list type is correct; should be pointer to LLVM type:
1128 // struct {
1129 // i32 gp_offset;
1130 // i32 fp_offset;
1131 // i8* overflow_arg_area;
1132 // i8* reg_save_area;
1133 // };
1134 unsigned neededInt, neededSSE;
1135 ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(),
1136 neededInt, neededSSE);
1137
1138 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
1139 // in the registers. If not go to step 7.
1140 if (!neededInt && !neededSSE)
1141 return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1142
1143 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
1144 // general purpose registers needed to pass type and num_fp to hold
1145 // the number of floating point registers needed.
1146
1147 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
1148 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
1149 // l->fp_offset > 304 - num_fp * 16 go to step 7.
1150 //
1151 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
1152 // register save space).
1153
1154 llvm::Value *InRegs = 0;
1155 llvm::Value *gp_offset_p = 0, *gp_offset = 0;
1156 llvm::Value *fp_offset_p = 0, *fp_offset = 0;
1157 if (neededInt) {
1158 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
1159 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
1160 InRegs =
1161 CGF.Builder.CreateICmpULE(gp_offset,
1162 llvm::ConstantInt::get(llvm::Type::Int32Ty,
1163 48 - neededInt * 8),
1164 "fits_in_gp");
1165 }
1166
1167 if (neededSSE) {
1168 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
1169 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
1170 llvm::Value *FitsInFP =
1171 CGF.Builder.CreateICmpULE(fp_offset,
1172 llvm::ConstantInt::get(llvm::Type::Int32Ty,
Daniel Dunbar63118762009-02-18 22:19:44 +00001173 176 - neededSSE * 16),
Daniel Dunbar3cfcec72009-02-12 09:04:14 +00001174 "fits_in_fp");
Daniel Dunbar72198842009-02-18 22:05:01 +00001175 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
Daniel Dunbar3cfcec72009-02-12 09:04:14 +00001176 }
1177
1178 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
1179 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
1180 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
1181 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
1182
1183 // Emit code to load the value if it was passed in registers.
1184
1185 CGF.EmitBlock(InRegBlock);
1186
1187 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
1188 // an offset of l->gp_offset and/or l->fp_offset. This may require
1189 // copying to a temporary location in case the parameter is passed
1190 // in different register classes or requires an alignment greater
1191 // than 8 for general purpose registers and 16 for XMM registers.
Daniel Dunbar4fc0d492009-02-18 03:44:19 +00001192 //
1193 // FIXME: This really results in shameful code when we end up
1194 // needing to collect arguments from different places; often what
1195 // should result in a simple assembling of a structure from
1196 // scattered addresses has many more loads than necessary. Can we
1197 // clean this up?
Daniel Dunbar3cfcec72009-02-12 09:04:14 +00001198 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1199 llvm::Value *RegAddr =
1200 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
1201 "reg_save_area");
1202 if (neededInt && neededSSE) {
Daniel Dunbara96ec382009-02-13 17:46:31 +00001203 // FIXME: Cleanup.
1204 assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
1205 const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
1206 llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
1207 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
1208 const llvm::Type *TyLo = ST->getElementType(0);
1209 const llvm::Type *TyHi = ST->getElementType(1);
1210 assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) &&
1211 "Unexpected ABI info for mixed regs");
1212 const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
1213 const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
1214 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1215 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1216 llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr;
1217 llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr;
1218 llvm::Value *V =
1219 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
1220 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1221 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
1222 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1223
1224 RegAddr = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(LTy));
Daniel Dunbar3cfcec72009-02-12 09:04:14 +00001225 } else if (neededInt) {
1226 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1227 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
1228 llvm::PointerType::getUnqual(LTy));
1229 } else {
Daniel Dunbar4fc0d492009-02-18 03:44:19 +00001230 if (neededSSE == 1) {
1231 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1232 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
1233 llvm::PointerType::getUnqual(LTy));
1234 } else {
1235 assert(neededSSE == 2 && "Invalid number of needed registers!");
1236 // SSE registers are spaced 16 bytes apart in the register save
1237 // area, we need to collect the two eightbytes together.
1238 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1239 llvm::Value *RegAddrHi =
1240 CGF.Builder.CreateGEP(RegAddrLo,
1241 llvm::ConstantInt::get(llvm::Type::Int32Ty, 16));
1242 const llvm::Type *DblPtrTy =
1243 llvm::PointerType::getUnqual(llvm::Type::DoubleTy);
1244 const llvm::StructType *ST = llvm::StructType::get(llvm::Type::DoubleTy,
1245 llvm::Type::DoubleTy,
1246 NULL);
1247 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
1248 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
1249 DblPtrTy));
1250 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1251 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
1252 DblPtrTy));
1253 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1254 RegAddr = CGF.Builder.CreateBitCast(Tmp,
1255 llvm::PointerType::getUnqual(LTy));
1256 }
Daniel Dunbar3cfcec72009-02-12 09:04:14 +00001257 }
1258
1259 // AMD64-ABI 3.5.7p5: Step 5. Set:
1260 // l->gp_offset = l->gp_offset + num_gp * 8
1261 // l->fp_offset = l->fp_offset + num_fp * 16.
1262 if (neededInt) {
1263 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
1264 neededInt * 8);
1265 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
1266 gp_offset_p);
1267 }
1268 if (neededSSE) {
1269 llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty,
1270 neededSSE * 16);
1271 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
1272 fp_offset_p);
1273 }
1274 CGF.EmitBranch(ContBlock);
1275
1276 // Emit code to load the value if it was passed in memory.
1277
1278 CGF.EmitBlock(InMemBlock);
1279 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1280
1281 // Return the appropriate result.
1282
1283 CGF.EmitBlock(ContBlock);
1284 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
1285 "vaarg.addr");
1286 ResAddr->reserveOperandSpace(2);
1287 ResAddr->addIncoming(RegAddr, InRegBlock);
1288 ResAddr->addIncoming(MemAddr, InMemBlock);
1289
1290 return ResAddr;
Daniel Dunbar7fbcf9c2009-02-10 20:44:09 +00001291}
1292
Sanjiv Gupta88b4e512009-04-21 06:01:16 +00001293// ABI Info for PIC16
1294class PIC16ABIInfo : public ABIInfo {
1295 ABIArgInfo classifyReturnType(QualType RetTy,
1296 ASTContext &Context) const;
1297
1298 ABIArgInfo classifyArgumentType(QualType RetTy,
1299 ASTContext &Context) const;
1300
1301 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
1302 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
1303 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1304 it != ie; ++it)
1305 it->info = classifyArgumentType(it->type, Context);
1306 }
1307
1308 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1309 CodeGenFunction &CGF) const;
1310
1311};
1312
1313ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
1314 ASTContext &Context) const {
1315 if (RetTy->isVoidType()) {
1316 return ABIArgInfo::getIgnore();
1317 } else {
1318 return ABIArgInfo::getDirect();
1319 }
1320}
1321
1322ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
1323 ASTContext &Context) const {
1324 return ABIArgInfo::getDirect();
1325}
1326
1327llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1328 CodeGenFunction &CGF) const {
1329 return 0;
1330}
1331
Eli Friedmanac90d8e2009-03-29 00:15:25 +00001332class ARMABIInfo : public ABIInfo {
1333 ABIArgInfo classifyReturnType(QualType RetTy,
1334 ASTContext &Context) const;
1335
1336 ABIArgInfo classifyArgumentType(QualType RetTy,
1337 ASTContext &Context) const;
1338
1339 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const;
1340
1341 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1342 CodeGenFunction &CGF) const;
1343};
1344
1345void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const {
1346 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context);
1347 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1348 it != ie; ++it) {
1349 it->info = classifyArgumentType(it->type, Context);
1350 }
1351}
1352
1353ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
1354 ASTContext &Context) const {
1355 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1356 return ABIArgInfo::getDirect();
1357 }
1358 // FIXME: This is kind of nasty... but there isn't much choice
1359 // because the ARM backend doesn't support byval.
1360 // FIXME: This doesn't handle alignment > 64 bits.
1361 const llvm::Type* ElemTy;
1362 unsigned SizeRegs;
1363 if (Context.getTypeAlign(Ty) > 32) {
1364 ElemTy = llvm::Type::Int64Ty;
1365 SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
1366 } else {
1367 ElemTy = llvm::Type::Int32Ty;
1368 SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
1369 }
1370 std::vector<const llvm::Type*> LLVMFields;
1371 LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
1372 const llvm::Type* STy = llvm::StructType::get(LLVMFields, true);
1373 return ABIArgInfo::getCoerce(STy);
1374}
1375
1376ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
1377 ASTContext &Context) const {
1378 if (RetTy->isVoidType()) {
1379 return ABIArgInfo::getIgnore();
1380 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1381 // Aggregates <= 4 bytes are returned in r0; other aggregates
1382 // are returned indirectly.
1383 uint64_t Size = Context.getTypeSize(RetTy);
1384 if (Size <= 32)
1385 return ABIArgInfo::getCoerce(llvm::Type::Int32Ty);
1386 return ABIArgInfo::getIndirect(0);
1387 } else {
1388 return ABIArgInfo::getDirect();
1389 }
1390}
1391
1392llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1393 CodeGenFunction &CGF) const {
1394 // FIXME: Need to handle alignment
1395 const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
1396 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
1397
1398 CGBuilderTy &Builder = CGF.Builder;
1399 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1400 "ap");
1401 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
1402 llvm::Type *PTy =
1403 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
1404 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1405
1406 uint64_t Offset =
1407 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
1408 llvm::Value *NextAddr =
1409 Builder.CreateGEP(Addr,
1410 llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset),
1411 "ap.next");
1412 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1413
1414 return AddrTyped;
1415}
1416
Daniel Dunbarf98eeff2008-10-13 17:02:26 +00001417ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
Daniel Dunbar7fbcf9c2009-02-10 20:44:09 +00001418 ASTContext &Context) const {
Daniel Dunbareec02622009-02-03 06:30:17 +00001419 if (RetTy->isVoidType()) {
1420 return ABIArgInfo::getIgnore();
1421 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
Daniel Dunbar88dde9b2009-02-05 08:00:50 +00001422 return ABIArgInfo::getIndirect(0);
Daniel Dunbareec02622009-02-03 06:30:17 +00001423 } else {
1424 return ABIArgInfo::getDirect();
1425 }
Daniel Dunbarf98eeff2008-10-13 17:02:26 +00001426}
1427
1428ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
Daniel Dunbar7fbcf9c2009-02-10 20:44:09 +00001429 ASTContext &Context) const {
Daniel Dunbareec02622009-02-03 06:30:17 +00001430 if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
Daniel Dunbar88dde9b2009-02-05 08:00:50 +00001431 return ABIArgInfo::getIndirect(0);
Daniel Dunbareec02622009-02-03 06:30:17 +00001432 } else {
1433 return ABIArgInfo::getDirect();
1434 }
Daniel Dunbarf98eeff2008-10-13 17:02:26 +00001435}
1436
Daniel Dunbar7fbcf9c2009-02-10 20:44:09 +00001437llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1438 CodeGenFunction &CGF) const {
1439 return 0;
1440}
1441
Daniel Dunbarf98eeff2008-10-13 17:02:26 +00001442const ABIInfo &CodeGenTypes::getABIInfo() const {
1443 if (TheABIInfo)
1444 return *TheABIInfo;
1445
1446 // For now we just cache this in the CodeGenTypes and don't bother
1447 // to free it.
1448 const char *TargetPrefix = getContext().Target.getTargetPrefix();
1449 if (strcmp(TargetPrefix, "x86") == 0) {
Eli Friedman5e175802009-03-23 23:26:24 +00001450 bool IsDarwin = strstr(getContext().Target.getTargetTriple(), "darwin");
Daniel Dunbarb6d5c442009-01-15 18:18:40 +00001451 switch (getContext().Target.getPointerWidth(0)) {
1452 case 32:
Douglas Gregorc55b0b02009-04-09 21:40:53 +00001453 return *(TheABIInfo = new X86_32ABIInfo(Context, IsDarwin));
Daniel Dunbarb6d5c442009-01-15 18:18:40 +00001454 case 64:
Daniel Dunbar56555952009-01-30 18:47:53 +00001455 return *(TheABIInfo = new X86_64ABIInfo());
Daniel Dunbarb6d5c442009-01-15 18:18:40 +00001456 }
Eli Friedmanac90d8e2009-03-29 00:15:25 +00001457 } else if (strcmp(TargetPrefix, "arm") == 0) {
1458 // FIXME: Support for OABI?
1459 return *(TheABIInfo = new ARMABIInfo());
Sanjiv Gupta88b4e512009-04-21 06:01:16 +00001460 } else if (strcmp(TargetPrefix, "pic16") == 0) {
1461 return *(TheABIInfo = new PIC16ABIInfo());
Daniel Dunbarf98eeff2008-10-13 17:02:26 +00001462 }
1463
1464 return *(TheABIInfo = new DefaultABIInfo);
1465}
1466
Daniel Dunbare126ab12008-09-10 02:41:04 +00001467/***/
1468
Daniel Dunbare92e0ab2009-02-03 05:31:23 +00001469CGFunctionInfo::CGFunctionInfo(QualType ResTy,
1470 const llvm::SmallVector<QualType, 16> &ArgTys) {
1471 NumArgs = ArgTys.size();
1472 Args = new ArgInfo[1 + NumArgs];
1473 Args[0].type = ResTy;
1474 for (unsigned i = 0; i < NumArgs; ++i)
1475 Args[1 + i].type = ArgTys[i];
1476}
1477
1478/***/
1479
Daniel Dunbar04d35782008-09-17 00:51:38 +00001480void CodeGenTypes::GetExpandedTypes(QualType Ty,
1481 std::vector<const llvm::Type*> &ArgTys) {
1482 const RecordType *RT = Ty->getAsStructureType();
1483 assert(RT && "Can only expand structure types.");
1484 const RecordDecl *RD = RT->getDecl();
1485 assert(!RD->hasFlexibleArrayMember() &&
1486 "Cannot expand structure with flexible array.");
1487
Douglas Gregorc55b0b02009-04-09 21:40:53 +00001488 for (RecordDecl::field_iterator i = RD->field_begin(Context),
1489 e = RD->field_end(Context); i != e; ++i) {
Daniel Dunbar04d35782008-09-17 00:51:38 +00001490 const FieldDecl *FD = *i;
1491 assert(!FD->isBitField() &&
1492 "Cannot expand structure with bit-field members.");
1493
1494 QualType FT = FD->getType();
1495 if (CodeGenFunction::hasAggregateLLVMType(FT)) {
1496 GetExpandedTypes(FT, ArgTys);
1497 } else {
1498 ArgTys.push_back(ConvertType(FT));
1499 }
1500 }
1501}
1502
1503llvm::Function::arg_iterator
1504CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1505 llvm::Function::arg_iterator AI) {
1506 const RecordType *RT = Ty->getAsStructureType();
1507 assert(RT && "Can only expand structure types.");
1508
1509 RecordDecl *RD = RT->getDecl();
1510 assert(LV.isSimple() &&
1511 "Unexpected non-simple lvalue during struct expansion.");
1512 llvm::Value *Addr = LV.getAddress();
Douglas Gregorc55b0b02009-04-09 21:40:53 +00001513 for (RecordDecl::field_iterator i = RD->field_begin(getContext()),
1514 e = RD->field_end(getContext()); i != e; ++i) {
Daniel Dunbar04d35782008-09-17 00:51:38 +00001515 FieldDecl *FD = *i;
1516 QualType FT = FD->getType();
1517
1518 // FIXME: What are the right qualifiers here?
1519 LValue LV = EmitLValueForField(Addr, FD, false, 0);
1520 if (CodeGenFunction::hasAggregateLLVMType(FT)) {
1521 AI = ExpandTypeFromArgs(FT, LV, AI);
1522 } else {
1523 EmitStoreThroughLValue(RValue::get(AI), LV, FT);
1524 ++AI;
1525 }
1526 }
1527
1528 return AI;
1529}
1530
1531void
1532CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
1533 llvm::SmallVector<llvm::Value*, 16> &Args) {
1534 const RecordType *RT = Ty->getAsStructureType();
1535 assert(RT && "Can only expand structure types.");
1536
1537 RecordDecl *RD = RT->getDecl();
1538 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
1539 llvm::Value *Addr = RV.getAggregateAddr();
Douglas Gregorc55b0b02009-04-09 21:40:53 +00001540 for (RecordDecl::field_iterator i = RD->field_begin(getContext()),
1541 e = RD->field_end(getContext()); i != e; ++i) {
Daniel Dunbar04d35782008-09-17 00:51:38 +00001542 FieldDecl *FD = *i;
1543 QualType FT = FD->getType();
1544
1545 // FIXME: What are the right qualifiers here?
1546 LValue LV = EmitLValueForField(Addr, FD, false, 0);
1547 if (CodeGenFunction::hasAggregateLLVMType(FT)) {
1548 ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
1549 } else {
1550 RValue RV = EmitLoadOfLValue(LV, FT);
1551 assert(RV.isScalar() &&
1552 "Unexpected non-scalar rvalue during struct expansion.");
1553 Args.push_back(RV.getScalarVal());
1554 }
1555 }
1556}
1557
Daniel Dunbar84379912009-02-02 19:06:38 +00001558/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1559/// a pointer to an object of type \arg Ty.
1560///
1561/// This safely handles the case when the src type is smaller than the
1562/// destination type; in this situation the values of bits which not
1563/// present in the src are undefined.
1564static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
1565 const llvm::Type *Ty,
1566 CodeGenFunction &CGF) {
1567 const llvm::Type *SrcTy =
1568 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
1569 uint64_t SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy);
1570 uint64_t DstSize = CGF.CGM.getTargetData().getTypePaddedSize(Ty);
1571
Daniel Dunbar77071992009-02-03 05:59:18 +00001572 // If load is legal, just bitcast the src pointer.
Daniel Dunbar84379912009-02-02 19:06:38 +00001573 if (SrcSize == DstSize) {
1574 llvm::Value *Casted =
1575 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
Daniel Dunbar3f062382009-02-07 02:46:03 +00001576 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
1577 // FIXME: Use better alignment / avoid requiring aligned load.
1578 Load->setAlignment(1);
1579 return Load;
Daniel Dunbar84379912009-02-02 19:06:38 +00001580 } else {
1581 assert(SrcSize < DstSize && "Coercion is losing source bits!");
1582
1583 // Otherwise do coercion through memory. This is stupid, but
1584 // simple.
1585 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
1586 llvm::Value *Casted =
1587 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
Daniel Dunbar3f062382009-02-07 02:46:03 +00001588 llvm::StoreInst *Store =
1589 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
1590 // FIXME: Use better alignment / avoid requiring aligned store.
1591 Store->setAlignment(1);
Daniel Dunbar84379912009-02-02 19:06:38 +00001592 return CGF.Builder.CreateLoad(Tmp);
1593 }
1594}
1595
1596/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1597/// where the source and destination may have different types.
1598///
1599/// This safely handles the case when the src type is larger than the
1600/// destination type; the upper bits of the src will be lost.
1601static void CreateCoercedStore(llvm::Value *Src,
1602 llvm::Value *DstPtr,
1603 CodeGenFunction &CGF) {
1604 const llvm::Type *SrcTy = Src->getType();
1605 const llvm::Type *DstTy =
1606 cast<llvm::PointerType>(DstPtr->getType())->getElementType();
1607
1608 uint64_t SrcSize = CGF.CGM.getTargetData().getTypePaddedSize(SrcTy);
1609 uint64_t DstSize = CGF.CGM.getTargetData().getTypePaddedSize(DstTy);
1610
Daniel Dunbare92e0ab2009-02-03 05:31:23 +00001611 // If store is legal, just bitcast the src pointer.
Daniel Dunbar84379912009-02-02 19:06:38 +00001612 if (SrcSize == DstSize) {
1613 llvm::Value *Casted =
1614 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
Daniel Dunbar3f062382009-02-07 02:46:03 +00001615 // FIXME: Use better alignment / avoid requiring aligned store.
1616 CGF.Builder.CreateStore(Src, Casted)->setAlignment(1);
Daniel Dunbar84379912009-02-02 19:06:38 +00001617 } else {
1618 assert(SrcSize > DstSize && "Coercion is missing bits!");
1619
1620 // Otherwise do coercion through memory. This is stupid, but
1621 // simple.
1622 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
1623 CGF.Builder.CreateStore(Src, Tmp);
1624 llvm::Value *Casted =
1625 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
Daniel Dunbar3f062382009-02-07 02:46:03 +00001626 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
1627 // FIXME: Use better alignment / avoid requiring aligned load.
1628 Load->setAlignment(1);
1629 CGF.Builder.CreateStore(Load, DstPtr);
Daniel Dunbar84379912009-02-02 19:06:38 +00001630 }
1631}
1632
Daniel Dunbar04d35782008-09-17 00:51:38 +00001633/***/
1634
Daniel Dunbar6ee022b2009-02-02 22:03:45 +00001635bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
Daniel Dunbar88dde9b2009-02-05 08:00:50 +00001636 return FI.getReturnInfo().isIndirect();
Daniel Dunbar9fc15a82009-02-02 21:43:58 +00001637}
1638
Daniel Dunbar3ad1f072008-09-10 04:01:49 +00001639const llvm::FunctionType *
Daniel Dunbar9fc15a82009-02-02 21:43:58 +00001640CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
Daniel Dunbar3ad1f072008-09-10 04:01:49 +00001641 std::vector<const llvm::Type*> ArgTys;
1642
1643 const llvm::Type *ResultType = 0;
1644
Daniel Dunbar0b37ca82009-02-02 23:43:58 +00001645 QualType RetTy = FI.getReturnType();
Daniel Dunbar77071992009-02-03 05:59:18 +00001646 const ABIArgInfo &RetAI = FI.getReturnInfo();
Daniel Dunbar22e30052008-09-11 01:48:57 +00001647 switch (RetAI.getKind()) {
Daniel Dunbar22e30052008-09-11 01:48:57 +00001648 case ABIArgInfo::Expand:
1649 assert(0 && "Invalid ABI kind for return argument");
1650
Daniel Dunbarb1a60c02009-02-03 06:17:37 +00001651 case ABIArgInfo::Direct:
1652 ResultType = ConvertType(RetTy);
1653 break;
1654
Daniel Dunbar88dde9b2009-02-05 08:00:50 +00001655 case ABIArgInfo::Indirect: {
1656 assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
Daniel Dunbar3ad1f072008-09-10 04:01:49 +00001657 ResultType = llvm::Type::VoidTy;
Daniel Dunbara9976a22008-09-10 07:00:50 +00001658 const llvm::Type *STy = ConvertType(RetTy);
Daniel Dunbar3ad1f072008-09-10 04:01:49 +00001659 ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
1660 break;
1661 }
1662
Daniel Dunbar1358b202009-01-26 21:26:08 +00001663 case ABIArgInfo::Ignore:
1664 ResultType = llvm::Type::VoidTy;
1665 break;
1666
Daniel Dunbar3ad1f072008-09-10 04:01:49 +00001667 case ABIArgInfo::Coerce:
Daniel Dunbar73d66602008-09-10 07:04:09 +00001668 ResultType = RetAI.getCoerceToType();
Daniel Dunbar3ad1f072008-09-10 04:01:49 +00001669 break;
1670 }
1671
Daniel Dunbare92e0ab2009-02-03 05:31:23 +00001672 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1673 ie = FI.arg_end(); it != ie; ++it) {
1674 const ABIArgInfo &AI = it->info;
Daniel Dunbar22e30052008-09-11 01:48:57 +00001675
1676 switch (AI.getKind()) {
Daniel Dunbar1358b202009-01-26 21:26:08 +00001677 case ABIArgInfo::Ignore:
1678 break;
1679
Daniel Dunbar04d35782008-09-17 00:51:38 +00001680 case ABIArgInfo::Coerce:
Daniel Dunbar33fa5812009-02-03 19:12:28 +00001681 ArgTys.push_back(AI.getCoerceToType());
1682 break;
1683
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00001684 case ABIArgInfo::Indirect: {
Daniel Dunbar88dde9b2009-02-05 08:00:50 +00001685 // indirect arguments are always on the stack, which is addr space #0.
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00001686 const llvm::Type *LTy = ConvertTypeForMem(it->type);
1687 ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
Daniel Dunbar22e30052008-09-11 01:48:57 +00001688 break;
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00001689 }
Daniel Dunbar22e30052008-09-11 01:48:57 +00001690
Daniel Dunbarb1a60c02009-02-03 06:17:37 +00001691 case ABIArgInfo::Direct:
Daniel Dunbar6f56e452009-02-05 09:16:39 +00001692 ArgTys.push_back(ConvertType(it->type));
Daniel Dunbar22e30052008-09-11 01:48:57 +00001693 break;
Daniel Dunbar22e30052008-09-11 01:48:57 +00001694
1695 case ABIArgInfo::Expand:
Daniel Dunbare92e0ab2009-02-03 05:31:23 +00001696 GetExpandedTypes(it->type, ArgTys);
Daniel Dunbar22e30052008-09-11 01:48:57 +00001697 break;
1698 }
Daniel Dunbar3ad1f072008-09-10 04:01:49 +00001699 }
1700
Daniel Dunbar9fc15a82009-02-02 21:43:58 +00001701 return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
Daniel Dunbar49f5a0d2008-09-09 23:48:28 +00001702}
1703
Daniel Dunbar0b37ca82009-02-02 23:43:58 +00001704void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
Daniel Dunbar6ee022b2009-02-02 22:03:45 +00001705 const Decl *TargetDecl,
Devang Patela85a9ef2008-09-25 21:02:23 +00001706 AttributeListType &PAL) {
Daniel Dunbarbccb0682008-09-10 00:32:18 +00001707 unsigned FuncAttrs = 0;
Devang Patel2bb6eb82008-09-26 22:53:57 +00001708 unsigned RetAttrs = 0;
Daniel Dunbarbccb0682008-09-10 00:32:18 +00001709
Anton Korobeynikov2431e602009-04-04 00:49:24 +00001710 // FIXME: handle sseregparm someday...
Daniel Dunbarbccb0682008-09-10 00:32:18 +00001711 if (TargetDecl) {
Daniel Dunbar78582862009-04-13 21:08:27 +00001712 if (TargetDecl->hasAttr<NoThrowAttr>())
Devang Patela85a9ef2008-09-25 21:02:23 +00001713 FuncAttrs |= llvm::Attribute::NoUnwind;
Daniel Dunbar78582862009-04-13 21:08:27 +00001714 if (TargetDecl->hasAttr<NoReturnAttr>())
Devang Patela85a9ef2008-09-25 21:02:23 +00001715 FuncAttrs |= llvm::Attribute::NoReturn;
Daniel Dunbar78582862009-04-13 21:08:27 +00001716 if (TargetDecl->hasAttr<ConstAttr>())
Anders Carlssondd6791c2008-10-05 23:32:53 +00001717 FuncAttrs |= llvm::Attribute::ReadNone;
Daniel Dunbar78582862009-04-13 21:08:27 +00001718 else if (TargetDecl->hasAttr<PureAttr>())
Daniel Dunbar521c3a32009-04-10 22:14:52 +00001719 FuncAttrs |= llvm::Attribute::ReadOnly;
Daniel Dunbarbccb0682008-09-10 00:32:18 +00001720 }
1721
Daniel Dunbar0b37ca82009-02-02 23:43:58 +00001722 QualType RetTy = FI.getReturnType();
Daniel Dunbarbccb0682008-09-10 00:32:18 +00001723 unsigned Index = 1;
Daniel Dunbar77071992009-02-03 05:59:18 +00001724 const ABIArgInfo &RetAI = FI.getReturnInfo();
Daniel Dunbar3ad1f072008-09-10 04:01:49 +00001725 switch (RetAI.getKind()) {
Daniel Dunbarb1a60c02009-02-03 06:17:37 +00001726 case ABIArgInfo::Direct:
Daniel Dunbare126ab12008-09-10 02:41:04 +00001727 if (RetTy->isPromotableIntegerType()) {
1728 if (RetTy->isSignedIntegerType()) {
Devang Patel2bb6eb82008-09-26 22:53:57 +00001729 RetAttrs |= llvm::Attribute::SExt;
Daniel Dunbare126ab12008-09-10 02:41:04 +00001730 } else if (RetTy->isUnsignedIntegerType()) {
Devang Patel2bb6eb82008-09-26 22:53:57 +00001731 RetAttrs |= llvm::Attribute::ZExt;
Daniel Dunbare126ab12008-09-10 02:41:04 +00001732 }
1733 }
1734 break;
1735
Daniel Dunbar88dde9b2009-02-05 08:00:50 +00001736 case ABIArgInfo::Indirect:
Devang Patela85a9ef2008-09-25 21:02:23 +00001737 PAL.push_back(llvm::AttributeWithIndex::get(Index,
Daniel Dunbarebbb8f32009-01-31 02:19:00 +00001738 llvm::Attribute::StructRet |
1739 llvm::Attribute::NoAlias));
Daniel Dunbarbccb0682008-09-10 00:32:18 +00001740 ++Index;
Daniel Dunbar39ea2c12009-03-18 19:51:01 +00001741 // sret disables readnone and readonly
1742 FuncAttrs &= ~(llvm::Attribute::ReadOnly |
1743 llvm::Attribute::ReadNone);
Daniel Dunbare126ab12008-09-10 02:41:04 +00001744 break;
1745
Daniel Dunbar1358b202009-01-26 21:26:08 +00001746 case ABIArgInfo::Ignore:
Daniel Dunbare126ab12008-09-10 02:41:04 +00001747 case ABIArgInfo::Coerce:
Daniel Dunbare126ab12008-09-10 02:41:04 +00001748 break;
Daniel Dunbar22e30052008-09-11 01:48:57 +00001749
Daniel Dunbar22e30052008-09-11 01:48:57 +00001750 case ABIArgInfo::Expand:
1751 assert(0 && "Invalid ABI kind for return argument");
Daniel Dunbarbccb0682008-09-10 00:32:18 +00001752 }
Daniel Dunbare126ab12008-09-10 02:41:04 +00001753
Devang Patel2bb6eb82008-09-26 22:53:57 +00001754 if (RetAttrs)
1755 PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
Anton Korobeynikov2431e602009-04-04 00:49:24 +00001756
1757 // FIXME: we need to honour command line settings also...
1758 // FIXME: RegParm should be reduced in case of nested functions and/or global
1759 // register variable.
1760 signed RegParm = 0;
1761 if (TargetDecl)
1762 if (const RegparmAttr *RegParmAttr = TargetDecl->getAttr<RegparmAttr>())
1763 RegParm = RegParmAttr->getNumParams();
1764
1765 unsigned PointerWidth = getContext().Target.getPointerWidth(0);
Daniel Dunbare92e0ab2009-02-03 05:31:23 +00001766 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1767 ie = FI.arg_end(); it != ie; ++it) {
1768 QualType ParamType = it->type;
1769 const ABIArgInfo &AI = it->info;
Devang Patela85a9ef2008-09-25 21:02:23 +00001770 unsigned Attributes = 0;
Anton Korobeynikov2431e602009-04-04 00:49:24 +00001771
Daniel Dunbar22e30052008-09-11 01:48:57 +00001772 switch (AI.getKind()) {
Daniel Dunbar33fa5812009-02-03 19:12:28 +00001773 case ABIArgInfo::Coerce:
1774 break;
1775
Daniel Dunbar88dde9b2009-02-05 08:00:50 +00001776 case ABIArgInfo::Indirect:
Devang Patela85a9ef2008-09-25 21:02:23 +00001777 Attributes |= llvm::Attribute::ByVal;
Anton Korobeynikov2431e602009-04-04 00:49:24 +00001778 Attributes |=
Daniel Dunbar88dde9b2009-02-05 08:00:50 +00001779 llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
Daniel Dunbar39ea2c12009-03-18 19:51:01 +00001780 // byval disables readnone and readonly.
1781 FuncAttrs &= ~(llvm::Attribute::ReadOnly |
1782 llvm::Attribute::ReadNone);
Daniel Dunbar22e30052008-09-11 01:48:57 +00001783 break;
1784
Daniel Dunbarb1a60c02009-02-03 06:17:37 +00001785 case ABIArgInfo::Direct:
Daniel Dunbar22e30052008-09-11 01:48:57 +00001786 if (ParamType->isPromotableIntegerType()) {
1787 if (ParamType->isSignedIntegerType()) {
Devang Patela85a9ef2008-09-25 21:02:23 +00001788 Attributes |= llvm::Attribute::SExt;
Daniel Dunbar22e30052008-09-11 01:48:57 +00001789 } else if (ParamType->isUnsignedIntegerType()) {
Devang Patela85a9ef2008-09-25 21:02:23 +00001790 Attributes |= llvm::Attribute::ZExt;
Daniel Dunbar22e30052008-09-11 01:48:57 +00001791 }
Daniel Dunbarbccb0682008-09-10 00:32:18 +00001792 }
Anton Korobeynikov2431e602009-04-04 00:49:24 +00001793 if (RegParm > 0 &&
1794 (ParamType->isIntegerType() || ParamType->isPointerType())) {
1795 RegParm -=
1796 (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
1797 if (RegParm >= 0)
1798 Attributes |= llvm::Attribute::InReg;
1799 }
1800 // FIXME: handle sseregparm someday...
Daniel Dunbar22e30052008-09-11 01:48:57 +00001801 break;
Anton Korobeynikov2431e602009-04-04 00:49:24 +00001802
Daniel Dunbar1358b202009-01-26 21:26:08 +00001803 case ABIArgInfo::Ignore:
1804 // Skip increment, no matching LLVM parameter.
1805 continue;
1806
Daniel Dunbar04d35782008-09-17 00:51:38 +00001807 case ABIArgInfo::Expand: {
1808 std::vector<const llvm::Type*> Tys;
1809 // FIXME: This is rather inefficient. Do we ever actually need
1810 // to do anything here? The result should be just reconstructed
1811 // on the other side, so extension should be a non-issue.
1812 getTypes().GetExpandedTypes(ParamType, Tys);
1813 Index += Tys.size();
1814 continue;
1815 }
Daniel Dunbarbccb0682008-09-10 00:32:18 +00001816 }
Daniel Dunbar22e30052008-09-11 01:48:57 +00001817
Devang Patela85a9ef2008-09-25 21:02:23 +00001818 if (Attributes)
1819 PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
Daniel Dunbar04d35782008-09-17 00:51:38 +00001820 ++Index;
Daniel Dunbarbccb0682008-09-10 00:32:18 +00001821 }
Devang Patel2bb6eb82008-09-26 22:53:57 +00001822 if (FuncAttrs)
1823 PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
Daniel Dunbarbccb0682008-09-10 00:32:18 +00001824}
1825
Daniel Dunbar6ee022b2009-02-02 22:03:45 +00001826void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1827 llvm::Function *Fn,
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00001828 const FunctionArgList &Args) {
Daniel Dunbar5b7ac652009-02-03 06:02:10 +00001829 // FIXME: We no longer need the types from FunctionArgList; lift up
1830 // and simplify.
1831
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00001832 // Emit allocs for param decls. Give the LLVM Argument nodes names.
1833 llvm::Function::arg_iterator AI = Fn->arg_begin();
1834
1835 // Name the struct return argument.
Daniel Dunbar6ee022b2009-02-02 22:03:45 +00001836 if (CGM.ReturnTypeUsesSret(FI)) {
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00001837 AI->setName("agg.result");
1838 ++AI;
1839 }
Daniel Dunbar77071992009-02-03 05:59:18 +00001840
Daniel Dunbar14c884a2009-02-04 21:17:21 +00001841 assert(FI.arg_size() == Args.size() &&
1842 "Mismatch between function signature & arguments.");
Daniel Dunbar77071992009-02-03 05:59:18 +00001843 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00001844 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
Daniel Dunbar77071992009-02-03 05:59:18 +00001845 i != e; ++i, ++info_it) {
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00001846 const VarDecl *Arg = i->first;
Daniel Dunbar77071992009-02-03 05:59:18 +00001847 QualType Ty = info_it->type;
1848 const ABIArgInfo &ArgI = info_it->info;
Daniel Dunbar22e30052008-09-11 01:48:57 +00001849
1850 switch (ArgI.getKind()) {
Daniel Dunbar6f56e452009-02-05 09:16:39 +00001851 case ABIArgInfo::Indirect: {
1852 llvm::Value* V = AI;
1853 if (hasAggregateLLVMType(Ty)) {
1854 // Do nothing, aggregates and complex variables are accessed by
1855 // reference.
1856 } else {
1857 // Load scalar value from indirect argument.
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00001858 V = EmitLoadOfScalar(V, false, Ty);
Daniel Dunbar6f56e452009-02-05 09:16:39 +00001859 if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
1860 // This must be a promotion, for something like
1861 // "void a(x) short x; {..."
1862 V = EmitScalarConversion(V, Ty, Arg->getType());
1863 }
1864 }
1865 EmitParmDecl(*Arg, V);
1866 break;
1867 }
1868
Daniel Dunbarb1a60c02009-02-03 06:17:37 +00001869 case ABIArgInfo::Direct: {
Daniel Dunbar22e30052008-09-11 01:48:57 +00001870 assert(AI != Fn->arg_end() && "Argument mismatch!");
1871 llvm::Value* V = AI;
Daniel Dunbarcc811502009-02-05 11:13:54 +00001872 if (hasAggregateLLVMType(Ty)) {
1873 // Create a temporary alloca to hold the argument; the rest of
1874 // codegen expects to access aggregates & complex values by
1875 // reference.
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00001876 V = CreateTempAlloca(ConvertTypeForMem(Ty));
Daniel Dunbarcc811502009-02-05 11:13:54 +00001877 Builder.CreateStore(AI, V);
1878 } else {
1879 if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
1880 // This must be a promotion, for something like
1881 // "void a(x) short x; {..."
1882 V = EmitScalarConversion(V, Ty, Arg->getType());
1883 }
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00001884 }
Daniel Dunbar22e30052008-09-11 01:48:57 +00001885 EmitParmDecl(*Arg, V);
1886 break;
1887 }
Daniel Dunbar04d35782008-09-17 00:51:38 +00001888
1889 case ABIArgInfo::Expand: {
Daniel Dunbar77071992009-02-03 05:59:18 +00001890 // If this structure was expanded into multiple arguments then
Daniel Dunbar04d35782008-09-17 00:51:38 +00001891 // we need to create a temporary and reconstruct it from the
1892 // arguments.
Chris Lattner6c5ec622008-11-24 04:00:27 +00001893 std::string Name = Arg->getNameAsString();
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00001894 llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty),
Daniel Dunbar04d35782008-09-17 00:51:38 +00001895 (Name + ".addr").c_str());
1896 // FIXME: What are the right qualifiers here?
1897 llvm::Function::arg_iterator End =
1898 ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI);
1899 EmitParmDecl(*Arg, Temp);
Daniel Dunbar22e30052008-09-11 01:48:57 +00001900
Daniel Dunbar04d35782008-09-17 00:51:38 +00001901 // Name the arguments used in expansion and increment AI.
1902 unsigned Index = 0;
1903 for (; AI != End; ++AI, ++Index)
1904 AI->setName(Name + "." + llvm::utostr(Index));
1905 continue;
1906 }
Daniel Dunbar1358b202009-01-26 21:26:08 +00001907
1908 case ABIArgInfo::Ignore:
Daniel Dunbar94b4fec2009-02-10 00:06:49 +00001909 // Initialize the local variable appropriately.
1910 if (hasAggregateLLVMType(Ty)) {
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00001911 EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty)));
Daniel Dunbar94b4fec2009-02-10 00:06:49 +00001912 } else {
1913 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
1914 }
1915
Daniel Dunbar015bc8e2009-02-03 20:00:13 +00001916 // Skip increment, no matching LLVM parameter.
1917 continue;
Daniel Dunbar1358b202009-01-26 21:26:08 +00001918
Daniel Dunbar33fa5812009-02-03 19:12:28 +00001919 case ABIArgInfo::Coerce: {
1920 assert(AI != Fn->arg_end() && "Argument mismatch!");
1921 // FIXME: This is very wasteful; EmitParmDecl is just going to
1922 // drop the result in a new alloca anyway, so we could just
1923 // store into that directly if we broke the abstraction down
1924 // more.
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00001925 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce");
Daniel Dunbar33fa5812009-02-03 19:12:28 +00001926 CreateCoercedStore(AI, V, *this);
1927 // Match to what EmitParmDecl is expecting for this type.
Daniel Dunbar99473cd2009-02-04 07:22:24 +00001928 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00001929 V = EmitLoadOfScalar(V, false, Ty);
Daniel Dunbar99473cd2009-02-04 07:22:24 +00001930 if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
1931 // This must be a promotion, for something like
1932 // "void a(x) short x; {..."
1933 V = EmitScalarConversion(V, Ty, Arg->getType());
1934 }
1935 }
Daniel Dunbar33fa5812009-02-03 19:12:28 +00001936 EmitParmDecl(*Arg, V);
1937 break;
1938 }
Daniel Dunbar22e30052008-09-11 01:48:57 +00001939 }
Daniel Dunbar04d35782008-09-17 00:51:38 +00001940
1941 ++AI;
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00001942 }
1943 assert(AI == Fn->arg_end() && "Argument mismatch!");
1944}
1945
Daniel Dunbar6ee022b2009-02-02 22:03:45 +00001946void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00001947 llvm::Value *ReturnValue) {
Daniel Dunbare126ab12008-09-10 02:41:04 +00001948 llvm::Value *RV = 0;
1949
1950 // Functions with no result always return void.
1951 if (ReturnValue) {
Daniel Dunbar6ee022b2009-02-02 22:03:45 +00001952 QualType RetTy = FI.getReturnType();
Daniel Dunbar77071992009-02-03 05:59:18 +00001953 const ABIArgInfo &RetAI = FI.getReturnInfo();
Daniel Dunbare126ab12008-09-10 02:41:04 +00001954
1955 switch (RetAI.getKind()) {
Daniel Dunbar88dde9b2009-02-05 08:00:50 +00001956 case ABIArgInfo::Indirect:
Daniel Dunbar17d35372008-12-18 04:52:14 +00001957 if (RetTy->isAnyComplexType()) {
Daniel Dunbar17d35372008-12-18 04:52:14 +00001958 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1959 StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1960 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1961 EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy);
1962 } else {
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00001963 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
1964 false);
Daniel Dunbar17d35372008-12-18 04:52:14 +00001965 }
Daniel Dunbare126ab12008-09-10 02:41:04 +00001966 break;
Daniel Dunbar22e30052008-09-11 01:48:57 +00001967
Daniel Dunbarb1a60c02009-02-03 06:17:37 +00001968 case ABIArgInfo::Direct:
Daniel Dunbarcc811502009-02-05 11:13:54 +00001969 // The internal return value temp always will have
1970 // pointer-to-return-type type.
Daniel Dunbare126ab12008-09-10 02:41:04 +00001971 RV = Builder.CreateLoad(ReturnValue);
1972 break;
1973
Daniel Dunbar1358b202009-01-26 21:26:08 +00001974 case ABIArgInfo::Ignore:
1975 break;
1976
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00001977 case ABIArgInfo::Coerce:
Daniel Dunbar708d8a82009-01-27 01:36:03 +00001978 RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
Daniel Dunbar22e30052008-09-11 01:48:57 +00001979 break;
Daniel Dunbar22e30052008-09-11 01:48:57 +00001980
Daniel Dunbar22e30052008-09-11 01:48:57 +00001981 case ABIArgInfo::Expand:
1982 assert(0 && "Invalid ABI kind for return argument");
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00001983 }
1984 }
Daniel Dunbare126ab12008-09-10 02:41:04 +00001985
1986 if (RV) {
1987 Builder.CreateRet(RV);
1988 } else {
1989 Builder.CreateRetVoid();
1990 }
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00001991}
1992
Anders Carlssond927fa72009-04-08 20:47:54 +00001993RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
1994 return EmitAnyExprToTemp(E);
1995}
1996
Daniel Dunbar6ee022b2009-02-02 22:03:45 +00001997RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1998 llvm::Value *Callee,
Daniel Dunbar191eb9e2009-02-20 18:06:48 +00001999 const CallArgList &CallArgs,
2000 const Decl *TargetDecl) {
Daniel Dunbar5b7ac652009-02-03 06:02:10 +00002001 // FIXME: We no longer need the types from CallArgs; lift up and
2002 // simplify.
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00002003 llvm::SmallVector<llvm::Value*, 16> Args;
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00002004
2005 // Handle struct-return functions by passing a pointer to the
2006 // location that we would like to return into.
Daniel Dunbar9fc15a82009-02-02 21:43:58 +00002007 QualType RetTy = CallInfo.getReturnType();
Daniel Dunbar77071992009-02-03 05:59:18 +00002008 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
Daniel Dunbar32cae462009-02-05 09:24:53 +00002009 if (CGM.ReturnTypeUsesSret(CallInfo)) {
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00002010 // Create a temporary alloca to hold the result of the call. :(
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00002011 Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy)));
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00002012 }
2013
Daniel Dunbar14c884a2009-02-04 21:17:21 +00002014 assert(CallInfo.arg_size() == CallArgs.size() &&
2015 "Mismatch between function signature & arguments.");
Daniel Dunbar77071992009-02-03 05:59:18 +00002016 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00002017 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
Daniel Dunbar77071992009-02-03 05:59:18 +00002018 I != E; ++I, ++info_it) {
2019 const ABIArgInfo &ArgInfo = info_it->info;
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00002020 RValue RV = I->first;
Daniel Dunbar04d35782008-09-17 00:51:38 +00002021
2022 switch (ArgInfo.getKind()) {
Daniel Dunbar88dde9b2009-02-05 08:00:50 +00002023 case ABIArgInfo::Indirect:
Daniel Dunbar6f56e452009-02-05 09:16:39 +00002024 if (RV.isScalar() || RV.isComplex()) {
2025 // Make a temporary alloca to pass the argument.
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00002026 Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second)));
Daniel Dunbar6f56e452009-02-05 09:16:39 +00002027 if (RV.isScalar())
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00002028 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false);
Daniel Dunbar6f56e452009-02-05 09:16:39 +00002029 else
2030 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
2031 } else {
2032 Args.push_back(RV.getAggregateAddr());
2033 }
2034 break;
2035
Daniel Dunbarb1a60c02009-02-03 06:17:37 +00002036 case ABIArgInfo::Direct:
Daniel Dunbar04d35782008-09-17 00:51:38 +00002037 if (RV.isScalar()) {
2038 Args.push_back(RV.getScalarVal());
2039 } else if (RV.isComplex()) {
Daniel Dunbarcc811502009-02-05 11:13:54 +00002040 llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
2041 Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
2042 Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
2043 Args.push_back(Tmp);
Daniel Dunbar04d35782008-09-17 00:51:38 +00002044 } else {
Daniel Dunbarcc811502009-02-05 11:13:54 +00002045 Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
Daniel Dunbar04d35782008-09-17 00:51:38 +00002046 }
2047 break;
2048
Daniel Dunbar1358b202009-01-26 21:26:08 +00002049 case ABIArgInfo::Ignore:
2050 break;
2051
Daniel Dunbar33fa5812009-02-03 19:12:28 +00002052 case ABIArgInfo::Coerce: {
2053 // FIXME: Avoid the conversion through memory if possible.
2054 llvm::Value *SrcPtr;
2055 if (RV.isScalar()) {
Daniel Dunbar4ce351b2009-02-03 23:04:57 +00002056 SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00002057 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false);
Daniel Dunbar33fa5812009-02-03 19:12:28 +00002058 } else if (RV.isComplex()) {
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00002059 SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
Daniel Dunbar33fa5812009-02-03 19:12:28 +00002060 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
2061 } else
2062 SrcPtr = RV.getAggregateAddr();
2063 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
2064 *this));
2065 break;
2066 }
2067
Daniel Dunbar04d35782008-09-17 00:51:38 +00002068 case ABIArgInfo::Expand:
2069 ExpandTypeToArgs(I->second, RV, Args);
2070 break;
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00002071 }
2072 }
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00002073
Daniel Dunbar0a067402009-02-23 17:26:39 +00002074 llvm::BasicBlock *InvokeDest = getInvokeDest();
Devang Patela85a9ef2008-09-25 21:02:23 +00002075 CodeGen::AttributeListType AttributeList;
Daniel Dunbar191eb9e2009-02-20 18:06:48 +00002076 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList);
Daniel Dunbar0a067402009-02-23 17:26:39 +00002077 llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
2078 AttributeList.end());
Daniel Dunbarebbb8f32009-01-31 02:19:00 +00002079
Daniel Dunbar90e43452009-03-02 04:32:35 +00002080 llvm::CallSite CS;
2081 if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
2082 CS = Builder.CreateCall(Callee, &Args[0], &Args[0]+Args.size());
Daniel Dunbar0a067402009-02-23 17:26:39 +00002083 } else {
2084 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
Daniel Dunbar90e43452009-03-02 04:32:35 +00002085 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
2086 &Args[0], &Args[0]+Args.size());
Daniel Dunbar0a067402009-02-23 17:26:39 +00002087 EmitBlock(Cont);
Daniel Dunbaraf438dc2009-02-20 18:54:31 +00002088 }
2089
Daniel Dunbar90e43452009-03-02 04:32:35 +00002090 CS.setAttributes(Attrs);
2091 if (const llvm::Function *F = dyn_cast<llvm::Function>(Callee))
2092 CS.setCallingConv(F->getCallingConv());
2093
2094 // If the call doesn't return, finish the basic block and clear the
2095 // insertion point; this allows the rest of IRgen to discard
2096 // unreachable code.
2097 if (CS.doesNotReturn()) {
2098 Builder.CreateUnreachable();
2099 Builder.ClearInsertionPoint();
2100
2101 // FIXME: For now, emit a dummy basic block because expr
2102 // emitters in generally are not ready to handle emitting
2103 // expressions at unreachable points.
2104 EnsureInsertPoint();
2105
2106 // Return a reasonable RValue.
2107 return GetUndefRValue(RetTy);
2108 }
2109
2110 llvm::Instruction *CI = CS.getInstruction();
Chris Lattner28466632009-03-22 00:32:22 +00002111 if (Builder.isNamePreserving() && CI->getType() != llvm::Type::VoidTy)
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00002112 CI->setName("call");
Daniel Dunbare126ab12008-09-10 02:41:04 +00002113
2114 switch (RetAI.getKind()) {
Daniel Dunbar88dde9b2009-02-05 08:00:50 +00002115 case ABIArgInfo::Indirect:
Daniel Dunbare126ab12008-09-10 02:41:04 +00002116 if (RetTy->isAnyComplexType())
Daniel Dunbar04d35782008-09-17 00:51:38 +00002117 return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
Chris Lattner28466632009-03-22 00:32:22 +00002118 if (CodeGenFunction::hasAggregateLLVMType(RetTy))
Daniel Dunbar04d35782008-09-17 00:51:38 +00002119 return RValue::getAggregate(Args[0]);
Chris Lattner28466632009-03-22 00:32:22 +00002120 return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
Daniel Dunbar22e30052008-09-11 01:48:57 +00002121
Daniel Dunbarb1a60c02009-02-03 06:17:37 +00002122 case ABIArgInfo::Direct:
Daniel Dunbarcc811502009-02-05 11:13:54 +00002123 if (RetTy->isAnyComplexType()) {
2124 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
2125 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
2126 return RValue::getComplex(std::make_pair(Real, Imag));
Chris Lattner28466632009-03-22 00:32:22 +00002127 }
2128 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00002129 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
Daniel Dunbarcc811502009-02-05 11:13:54 +00002130 Builder.CreateStore(CI, V);
2131 return RValue::getAggregate(V);
Chris Lattner28466632009-03-22 00:32:22 +00002132 }
2133 return RValue::get(CI);
Daniel Dunbare126ab12008-09-10 02:41:04 +00002134
Daniel Dunbar1358b202009-01-26 21:26:08 +00002135 case ABIArgInfo::Ignore:
Daniel Dunbareec02622009-02-03 06:30:17 +00002136 // If we are ignoring an argument that had a result, make sure to
2137 // construct the appropriate return value for our caller.
Daniel Dunbar900c85a2009-02-05 07:09:07 +00002138 return GetUndefRValue(RetTy);
Daniel Dunbar1358b202009-01-26 21:26:08 +00002139
Daniel Dunbar73d66602008-09-10 07:04:09 +00002140 case ABIArgInfo::Coerce: {
Daniel Dunbar33fa5812009-02-03 19:12:28 +00002141 // FIXME: Avoid the conversion through memory if possible.
Daniel Dunbar8559b5d2009-02-10 01:51:39 +00002142 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
Daniel Dunbar708d8a82009-01-27 01:36:03 +00002143 CreateCoercedStore(CI, V, *this);
Anders Carlssonfccf7472008-11-25 22:21:48 +00002144 if (RetTy->isAnyComplexType())
2145 return RValue::getComplex(LoadComplexFromAddr(V, false));
Chris Lattner28466632009-03-22 00:32:22 +00002146 if (CodeGenFunction::hasAggregateLLVMType(RetTy))
Anders Carlssonfccf7472008-11-25 22:21:48 +00002147 return RValue::getAggregate(V);
Chris Lattner28466632009-03-22 00:32:22 +00002148 return RValue::get(EmitLoadOfScalar(V, false, RetTy));
Daniel Dunbar73d66602008-09-10 07:04:09 +00002149 }
Daniel Dunbar22e30052008-09-11 01:48:57 +00002150
Daniel Dunbar22e30052008-09-11 01:48:57 +00002151 case ABIArgInfo::Expand:
2152 assert(0 && "Invalid ABI kind for return argument");
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00002153 }
Daniel Dunbare126ab12008-09-10 02:41:04 +00002154
2155 assert(0 && "Unhandled ABIArgInfo::Kind");
2156 return RValue::get(0);
Daniel Dunbarfc1a9c42008-09-09 23:27:19 +00002157}
Daniel Dunbar7fbcf9c2009-02-10 20:44:09 +00002158
2159/* VarArg handling */
2160
2161llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
2162 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
2163}