blob: a8861cf4500c8a49861a0d14c2e544633b342cf8 [file] [log] [blame]
Anton Korobeynikov82d0a412010-01-10 12:58:08 +00001//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
Anton Korobeynikov82d0a412010-01-10 12:58:08 +000015#include "TargetInfo.h"
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +000016#include "ABIInfo.h"
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +000017#include "CGCXXABI.h"
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +000018#include "CodeGenFunction.h"
Anders Carlsson19cc4ab2009-07-18 19:43:29 +000019#include "clang/AST/RecordLayout.h"
Sandeep Patel34c1af82011-04-05 00:23:47 +000020#include "clang/Frontend/CodeGenOptions.h"
Daniel Dunbar2c0843f2009-08-24 08:52:16 +000021#include "llvm/ADT/Triple.h"
Chandler Carruth3b844ba2013-01-02 11:45:17 +000022#include "llvm/IR/DataLayout.h"
23#include "llvm/IR/Type.h"
Daniel Dunbar28df7a52009-12-03 09:13:49 +000024#include "llvm/Support/raw_ostream.h"
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +000025using namespace clang;
26using namespace CodeGen;
27
John McCallaeeb7012010-05-27 06:19:26 +000028static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
29 llvm::Value *Array,
30 llvm::Value *Value,
31 unsigned FirstIndex,
32 unsigned LastIndex) {
33 // Alternatively, we could emit this as a loop in the source.
34 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
35 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
36 Builder.CreateStore(Value, Cell);
37 }
38}
39
John McCalld608cdb2010-08-22 10:59:02 +000040static bool isAggregateTypeForABI(QualType T) {
John McCall9d232c82013-03-07 21:37:08 +000041 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
John McCalld608cdb2010-08-22 10:59:02 +000042 T->isMemberFunctionPointerType();
43}
44
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +000045ABIInfo::~ABIInfo() {}
46
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +000047static bool isRecordReturnIndirect(const RecordType *RT, CodeGen::CodeGenTypes &CGT) {
48 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
49 if (!RD)
50 return false;
51 return CGT.CGM.getCXXABI().isReturnTypeIndirect(RD);
52}
53
54
55static bool isRecordReturnIndirect(QualType T, CodeGen::CodeGenTypes &CGT) {
56 const RecordType *RT = T->getAs<RecordType>();
57 if (!RT)
58 return false;
59 return isRecordReturnIndirect(RT, CGT);
60}
61
62static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
63 CodeGen::CodeGenTypes &CGT) {
64 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
65 if (!RD)
66 return CGCXXABI::RAA_Default;
67 return CGT.CGM.getCXXABI().getRecordArgABI(RD);
68}
69
70static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
71 CodeGen::CodeGenTypes &CGT) {
72 const RecordType *RT = T->getAs<RecordType>();
73 if (!RT)
74 return CGCXXABI::RAA_Default;
75 return getRecordArgABI(RT, CGT);
76}
77
Chris Lattnerea044322010-07-29 02:01:43 +000078ASTContext &ABIInfo::getContext() const {
79 return CGT.getContext();
80}
81
82llvm::LLVMContext &ABIInfo::getVMContext() const {
83 return CGT.getLLVMContext();
84}
85
Micah Villmow25a6a842012-10-08 16:25:52 +000086const llvm::DataLayout &ABIInfo::getDataLayout() const {
87 return CGT.getDataLayout();
Chris Lattnerea044322010-07-29 02:01:43 +000088}
89
John McCall64aa4b32013-04-16 22:48:15 +000090const TargetInfo &ABIInfo::getTarget() const {
91 return CGT.getTarget();
92}
Chris Lattnerea044322010-07-29 02:01:43 +000093
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +000094void ABIArgInfo::dump() const {
Chris Lattner5f9e2722011-07-23 10:55:15 +000095 raw_ostream &OS = llvm::errs();
Daniel Dunbar28df7a52009-12-03 09:13:49 +000096 OS << "(ABIArgInfo Kind=";
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +000097 switch (TheKind) {
98 case Direct:
Chris Lattner800588f2010-07-29 06:26:06 +000099 OS << "Direct Type=";
Chris Lattner2acc6e32011-07-18 04:24:23 +0000100 if (llvm::Type *Ty = getCoerceToType())
Chris Lattner800588f2010-07-29 06:26:06 +0000101 Ty->print(OS);
102 else
103 OS << "null";
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000104 break;
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +0000105 case Extend:
Daniel Dunbar28df7a52009-12-03 09:13:49 +0000106 OS << "Extend";
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +0000107 break;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000108 case Ignore:
Daniel Dunbar28df7a52009-12-03 09:13:49 +0000109 OS << "Ignore";
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000110 break;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000111 case Indirect:
Daniel Dunbardc6d5742010-04-21 19:10:51 +0000112 OS << "Indirect Align=" << getIndirectAlign()
Joerg Sonnenbergere9b5d772011-07-15 18:23:44 +0000113 << " ByVal=" << getIndirectByVal()
Daniel Dunbarcf3b6f22010-09-16 20:42:02 +0000114 << " Realign=" << getIndirectRealign();
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000115 break;
116 case Expand:
Daniel Dunbar28df7a52009-12-03 09:13:49 +0000117 OS << "Expand";
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000118 break;
119 }
Daniel Dunbar28df7a52009-12-03 09:13:49 +0000120 OS << ")\n";
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000121}
122
Anton Korobeynikov82d0a412010-01-10 12:58:08 +0000123TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
124
John McCall49e34be2011-08-30 01:42:09 +0000125// If someone can figure out a general rule for this, that would be great.
126// It's probably just doomed to be platform-dependent, though.
127unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
128 // Verified for:
129 // x86-64 FreeBSD, Linux, Darwin
130 // x86-32 FreeBSD, Linux, Darwin
131 // PowerPC Linux, Darwin
132 // ARM Darwin (*not* EABI)
Tim Northoverc264e162013-01-31 12:13:10 +0000133 // AArch64 Linux
John McCall49e34be2011-08-30 01:42:09 +0000134 return 32;
135}
136
John McCallde5d3c72012-02-17 03:33:10 +0000137bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
138 const FunctionNoProtoType *fnType) const {
John McCall01f151e2011-09-21 08:08:30 +0000139 // The following conventions are known to require this to be false:
140 // x86_stdcall
141 // MIPS
142 // For everything else, we just prefer false unless we opt out.
143 return false;
144}
145
Reid Kleckner3190ca92013-05-08 13:44:39 +0000146void
147TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
148 llvm::SmallString<24> &Opt) const {
149 // This assumes the user is passing a library name like "rt" instead of a
150 // filename like "librt.a/so", and that they don't care whether it's static or
151 // dynamic.
152 Opt = "-l";
153 Opt += Lib;
154}
155
Daniel Dunbar98303b92009-09-13 08:03:58 +0000156static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000157
Sylvestre Ledruf3477c12012-09-27 10:16:10 +0000158/// isEmptyField - Return true iff a the field is "empty", that is it
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000159/// is an unnamed bit-field or an (array of) empty record(s).
Daniel Dunbar98303b92009-09-13 08:03:58 +0000160static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
161 bool AllowArrays) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000162 if (FD->isUnnamedBitfield())
163 return true;
164
165 QualType FT = FD->getType();
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000166
Eli Friedman7e7ad3f2011-11-18 03:47:20 +0000167 // Constant arrays of empty records count as empty, strip them off.
168 // Constant arrays of zero length always count as empty.
Daniel Dunbar98303b92009-09-13 08:03:58 +0000169 if (AllowArrays)
Eli Friedman7e7ad3f2011-11-18 03:47:20 +0000170 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
171 if (AT->getSize() == 0)
172 return true;
Daniel Dunbar98303b92009-09-13 08:03:58 +0000173 FT = AT->getElementType();
Eli Friedman7e7ad3f2011-11-18 03:47:20 +0000174 }
Daniel Dunbar98303b92009-09-13 08:03:58 +0000175
Daniel Dunbar5ea68612010-05-17 16:46:00 +0000176 const RecordType *RT = FT->getAs<RecordType>();
177 if (!RT)
178 return false;
179
180 // C++ record fields are never empty, at least in the Itanium ABI.
181 //
182 // FIXME: We should use a predicate for whether this behavior is true in the
183 // current ABI.
184 if (isa<CXXRecordDecl>(RT->getDecl()))
185 return false;
186
Daniel Dunbar98303b92009-09-13 08:03:58 +0000187 return isEmptyRecord(Context, FT, AllowArrays);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000188}
189
Sylvestre Ledruf3477c12012-09-27 10:16:10 +0000190/// isEmptyRecord - Return true iff a structure contains only empty
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000191/// fields. Note that a structure with a flexible array member is not
192/// considered empty.
Daniel Dunbar98303b92009-09-13 08:03:58 +0000193static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
Ted Kremenek6217b802009-07-29 21:53:49 +0000194 const RecordType *RT = T->getAs<RecordType>();
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000195 if (!RT)
196 return 0;
197 const RecordDecl *RD = RT->getDecl();
198 if (RD->hasFlexibleArrayMember())
199 return false;
Daniel Dunbar5ea68612010-05-17 16:46:00 +0000200
Argyrios Kyrtzidisc5f18f32011-05-17 02:17:52 +0000201 // If this is a C++ record, check the bases first.
Daniel Dunbar5ea68612010-05-17 16:46:00 +0000202 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
Argyrios Kyrtzidisc5f18f32011-05-17 02:17:52 +0000203 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
204 e = CXXRD->bases_end(); i != e; ++i)
205 if (!isEmptyRecord(Context, i->getType(), true))
206 return false;
Daniel Dunbar5ea68612010-05-17 16:46:00 +0000207
Argyrios Kyrtzidis17945a02009-06-30 02:36:12 +0000208 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
209 i != e; ++i)
David Blaikie581deb32012-06-06 20:45:41 +0000210 if (!isEmptyField(Context, *i, AllowArrays))
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000211 return false;
212 return true;
213}
214
215/// isSingleElementStruct - Determine if a structure is a "single
216/// element struct", i.e. it has exactly one non-empty field or
217/// exactly one field which is itself a single element
218/// struct. Structures with flexible array members are never
219/// considered single element structs.
220///
221/// \return The field declaration for the single non-empty field, if
222/// it exists.
223static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
224 const RecordType *RT = T->getAsStructureType();
225 if (!RT)
226 return 0;
227
228 const RecordDecl *RD = RT->getDecl();
229 if (RD->hasFlexibleArrayMember())
230 return 0;
231
232 const Type *Found = 0;
Michael J. Spencer8bea82f2010-08-25 18:17:27 +0000233
Daniel Dunbar9430d5a2010-05-11 21:15:36 +0000234 // If this is a C++ record, check the bases first.
235 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
236 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
237 e = CXXRD->bases_end(); i != e; ++i) {
Daniel Dunbar9430d5a2010-05-11 21:15:36 +0000238 // Ignore empty records.
Daniel Dunbar5ea68612010-05-17 16:46:00 +0000239 if (isEmptyRecord(Context, i->getType(), true))
Daniel Dunbar9430d5a2010-05-11 21:15:36 +0000240 continue;
241
242 // If we already found an element then this isn't a single-element struct.
243 if (Found)
244 return 0;
245
246 // If this is non-empty and not a single element struct, the composite
247 // cannot be a single element struct.
248 Found = isSingleElementStruct(i->getType(), Context);
249 if (!Found)
250 return 0;
251 }
252 }
253
254 // Check for single element.
Argyrios Kyrtzidis17945a02009-06-30 02:36:12 +0000255 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
256 i != e; ++i) {
David Blaikie581deb32012-06-06 20:45:41 +0000257 const FieldDecl *FD = *i;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000258 QualType FT = FD->getType();
259
260 // Ignore empty fields.
Daniel Dunbar98303b92009-09-13 08:03:58 +0000261 if (isEmptyField(Context, FD, true))
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000262 continue;
263
264 // If we already found an element then this isn't a single-element
265 // struct.
266 if (Found)
267 return 0;
268
269 // Treat single element arrays as the element.
270 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
271 if (AT->getSize().getZExtValue() != 1)
272 break;
273 FT = AT->getElementType();
274 }
275
John McCalld608cdb2010-08-22 10:59:02 +0000276 if (!isAggregateTypeForABI(FT)) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000277 Found = FT.getTypePtr();
278 } else {
279 Found = isSingleElementStruct(FT, Context);
280 if (!Found)
281 return 0;
282 }
283 }
284
Eli Friedmanbd4d3bc2011-11-18 01:25:50 +0000285 // We don't consider a struct a single-element struct if it has
286 // padding beyond the element type.
287 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
288 return 0;
289
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000290 return Found;
291}
292
293static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
Eli Friedmandb748a32012-11-29 23:21:04 +0000294 // Treat complex types as the element type.
295 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
296 Ty = CTy->getElementType();
297
298 // Check for a type which we know has a simple scalar argument-passing
299 // convention without any padding. (We're specifically looking for 32
300 // and 64-bit integer and integer-equivalents, float, and double.)
Daniel Dunbara1842d32010-05-14 03:40:53 +0000301 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
Eli Friedmandb748a32012-11-29 23:21:04 +0000302 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000303 return false;
304
305 uint64_t Size = Context.getTypeSize(Ty);
306 return Size == 32 || Size == 64;
307}
308
Daniel Dunbar53012f42009-11-09 01:33:53 +0000309/// canExpandIndirectArgument - Test whether an argument type which is to be
310/// passed indirectly (on the stack) would have the equivalent layout if it was
311/// expanded into separate arguments. If so, we prefer to do the latter to avoid
312/// inhibiting optimizations.
313///
314// FIXME: This predicate is missing many cases, currently it just follows
315// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
316// should probably make this smarter, or better yet make the LLVM backend
317// capable of handling it.
318static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
319 // We can only expand structure types.
320 const RecordType *RT = Ty->getAs<RecordType>();
321 if (!RT)
322 return false;
323
324 // We can only expand (C) structures.
325 //
326 // FIXME: This needs to be generalized to handle classes as well.
327 const RecordDecl *RD = RT->getDecl();
328 if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
329 return false;
330
Eli Friedman506d4e32011-11-18 01:32:26 +0000331 uint64_t Size = 0;
332
Argyrios Kyrtzidis17945a02009-06-30 02:36:12 +0000333 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
334 i != e; ++i) {
David Blaikie581deb32012-06-06 20:45:41 +0000335 const FieldDecl *FD = *i;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000336
337 if (!is32Or64BitBasicType(FD->getType(), Context))
338 return false;
339
340 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
341 // how to expand them yet, and the predicate for telling if a bitfield still
342 // counts as "basic" is more complicated than what we were doing previously.
343 if (FD->isBitField())
344 return false;
Eli Friedman506d4e32011-11-18 01:32:26 +0000345
346 Size += Context.getTypeSize(FD->getType());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000347 }
348
Eli Friedman506d4e32011-11-18 01:32:26 +0000349 // Make sure there are not any holes in the struct.
350 if (Size != Context.getTypeSize(Ty))
351 return false;
352
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000353 return true;
354}
355
356namespace {
357/// DefaultABIInfo - The default implementation for ABI specific
358/// details. This implementation provides information which results in
359/// self-consistent and sensible LLVM IR generation, but does not
360/// conform to any particular ABI.
361class DefaultABIInfo : public ABIInfo {
Chris Lattnerea044322010-07-29 02:01:43 +0000362public:
363 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
Michael J. Spencer8bea82f2010-08-25 18:17:27 +0000364
Chris Lattnera3c109b2010-07-29 02:16:43 +0000365 ABIArgInfo classifyReturnType(QualType RetTy) const;
366 ABIArgInfo classifyArgumentType(QualType RetTy) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000367
Chris Lattneree5dcd02010-07-29 02:31:05 +0000368 virtual void computeInfo(CGFunctionInfo &FI) const {
Chris Lattnera3c109b2010-07-29 02:16:43 +0000369 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000370 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
371 it != ie; ++it)
Chris Lattnera3c109b2010-07-29 02:16:43 +0000372 it->info = classifyArgumentType(it->type);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000373 }
374
375 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
376 CodeGenFunction &CGF) const;
377};
378
Anton Korobeynikov82d0a412010-01-10 12:58:08 +0000379class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
380public:
Chris Lattnerea044322010-07-29 02:01:43 +0000381 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
382 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
Anton Korobeynikov82d0a412010-01-10 12:58:08 +0000383};
384
385llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
386 CodeGenFunction &CGF) const {
387 return 0;
388}
389
Chris Lattnera3c109b2010-07-29 02:16:43 +0000390ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
Jan Wen Voung90306932011-11-03 00:59:44 +0000391 if (isAggregateTypeForABI(Ty)) {
392 // Records with non trivial destructors/constructors should not be passed
393 // by value.
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +0000394 if (isRecordReturnIndirect(Ty, CGT))
Jan Wen Voung90306932011-11-03 00:59:44 +0000395 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
396
Anton Korobeynikov82d0a412010-01-10 12:58:08 +0000397 return ABIArgInfo::getIndirect(0);
Jan Wen Voung90306932011-11-03 00:59:44 +0000398 }
Daniel Dunbardc6d5742010-04-21 19:10:51 +0000399
Chris Lattnera14db752010-03-11 18:19:55 +0000400 // Treat an enum type as its underlying type.
401 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
402 Ty = EnumTy->getDecl()->getIntegerType();
Douglas Gregoraa74a1e2010-02-02 20:10:50 +0000403
Chris Lattnera14db752010-03-11 18:19:55 +0000404 return (Ty->isPromotableIntegerType() ?
405 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikov82d0a412010-01-10 12:58:08 +0000406}
407
Bob Wilson0024f942011-01-10 23:54:17 +0000408ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
409 if (RetTy->isVoidType())
410 return ABIArgInfo::getIgnore();
411
412 if (isAggregateTypeForABI(RetTy))
413 return ABIArgInfo::getIndirect(0);
414
415 // Treat an enum type as its underlying type.
416 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
417 RetTy = EnumTy->getDecl()->getIntegerType();
418
419 return (RetTy->isPromotableIntegerType() ?
420 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
421}
422
Derek Schuff9ed63f82012-09-06 17:37:28 +0000423//===----------------------------------------------------------------------===//
424// le32/PNaCl bitcode ABI Implementation
Eli Benderskyc0783dc2013-04-08 21:31:01 +0000425//
426// This is a simplified version of the x86_32 ABI. Arguments and return values
427// are always passed on the stack.
Derek Schuff9ed63f82012-09-06 17:37:28 +0000428//===----------------------------------------------------------------------===//
429
430class PNaClABIInfo : public ABIInfo {
431 public:
432 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
433
434 ABIArgInfo classifyReturnType(QualType RetTy) const;
Eli Benderskyc0783dc2013-04-08 21:31:01 +0000435 ABIArgInfo classifyArgumentType(QualType RetTy) const;
Derek Schuff9ed63f82012-09-06 17:37:28 +0000436
437 virtual void computeInfo(CGFunctionInfo &FI) const;
438 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
439 CodeGenFunction &CGF) const;
440};
441
442class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
443 public:
444 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
445 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
Eli Bendersky64b22d82013-07-03 19:19:12 +0000446
447 /// For PNaCl we don't want llvm.pow.* intrinsics to be emitted instead
448 /// of library function calls.
449 bool emitIntrinsicForPow() const { return false; }
Derek Schuff9ed63f82012-09-06 17:37:28 +0000450};
451
452void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
453 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
454
Derek Schuff9ed63f82012-09-06 17:37:28 +0000455 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
456 it != ie; ++it)
Eli Benderskyc0783dc2013-04-08 21:31:01 +0000457 it->info = classifyArgumentType(it->type);
Derek Schuff9ed63f82012-09-06 17:37:28 +0000458 }
459
460llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
461 CodeGenFunction &CGF) const {
462 return 0;
463}
464
Eli Benderskyc0783dc2013-04-08 21:31:01 +0000465/// \brief Classify argument of given type \p Ty.
466ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
Derek Schuff9ed63f82012-09-06 17:37:28 +0000467 if (isAggregateTypeForABI(Ty)) {
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +0000468 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
469 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
Derek Schuff9ed63f82012-09-06 17:37:28 +0000470 return ABIArgInfo::getIndirect(0);
Eli Benderskyc0783dc2013-04-08 21:31:01 +0000471 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
472 // Treat an enum type as its underlying type.
Derek Schuff9ed63f82012-09-06 17:37:28 +0000473 Ty = EnumTy->getDecl()->getIntegerType();
Eli Benderskyc0783dc2013-04-08 21:31:01 +0000474 } else if (Ty->isFloatingType()) {
475 // Floating-point types don't go inreg.
476 return ABIArgInfo::getDirect();
Derek Schuff9ed63f82012-09-06 17:37:28 +0000477 }
Eli Benderskyc0783dc2013-04-08 21:31:01 +0000478
479 return (Ty->isPromotableIntegerType() ?
480 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Derek Schuff9ed63f82012-09-06 17:37:28 +0000481}
482
483ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
484 if (RetTy->isVoidType())
485 return ABIArgInfo::getIgnore();
486
Eli Benderskye45dfd12013-04-04 22:49:35 +0000487 // In the PNaCl ABI we always return records/structures on the stack.
Derek Schuff9ed63f82012-09-06 17:37:28 +0000488 if (isAggregateTypeForABI(RetTy))
489 return ABIArgInfo::getIndirect(0);
490
491 // Treat an enum type as its underlying type.
492 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
493 RetTy = EnumTy->getDecl()->getIntegerType();
494
495 return (RetTy->isPromotableIntegerType() ?
496 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
497}
498
Chad Rosier1f1df1f2013-03-25 21:00:27 +0000499/// IsX86_MMXType - Return true if this is an MMX type.
500bool IsX86_MMXType(llvm::Type *IRType) {
501 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
Bill Wendlingbb465d72010-10-18 03:41:31 +0000502 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
503 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
504 IRType->getScalarSizeInBits() != 64;
505}
506
Jay Foadef6de3d2011-07-11 09:56:20 +0000507static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
Chris Lattner5f9e2722011-07-23 10:55:15 +0000508 StringRef Constraint,
Jay Foadef6de3d2011-07-11 09:56:20 +0000509 llvm::Type* Ty) {
Tim Northover1bea6532013-06-07 00:04:50 +0000510 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
511 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
512 // Invalid MMX constraint
513 return 0;
514 }
515
Peter Collingbourne4b93d662011-02-19 23:03:58 +0000516 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
Tim Northover1bea6532013-06-07 00:04:50 +0000517 }
518
519 // No operation needed
Peter Collingbourne4b93d662011-02-19 23:03:58 +0000520 return Ty;
521}
522
Chris Lattnerdce5ad02010-06-28 20:05:43 +0000523//===----------------------------------------------------------------------===//
524// X86-32 ABI Implementation
525//===----------------------------------------------------------------------===//
Michael J. Spencer8bea82f2010-08-25 18:17:27 +0000526
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000527/// X86_32ABIInfo - The X86-32 ABI information.
528class X86_32ABIInfo : public ABIInfo {
Rafael Espindolab48280b2012-07-31 02:44:24 +0000529 enum Class {
530 Integer,
531 Float
532 };
533
Daniel Dunbarfb67d6c2010-09-16 20:41:56 +0000534 static const unsigned MinABIStackAlignInBytes = 4;
535
David Chisnall1e4249c2009-08-17 23:08:21 +0000536 bool IsDarwinVectorABI;
537 bool IsSmallStructInRegABI;
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +0000538 bool IsWin32StructABI;
Rafael Espindolab48280b2012-07-31 02:44:24 +0000539 unsigned DefaultNumRegisterParameters;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000540
541 static bool isRegisterSize(unsigned Size) {
542 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
543 }
544
Aaron Ballman6c60c8d2012-02-22 03:04:13 +0000545 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context,
546 unsigned callingConvention);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000547
Daniel Dunbardc6d5742010-04-21 19:10:51 +0000548 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
549 /// such that the argument will be passed in memory.
Rafael Espindola0b4cc952012-10-19 05:04:37 +0000550 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal,
551 unsigned &FreeRegs) const;
Daniel Dunbardc6d5742010-04-21 19:10:51 +0000552
Daniel Dunbarfb67d6c2010-09-16 20:41:56 +0000553 /// \brief Return the alignment to use for the given type on the stack.
Daniel Dunbare59d8582010-09-16 20:42:06 +0000554 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
Daniel Dunbarfb67d6c2010-09-16 20:41:56 +0000555
Rafael Espindolab48280b2012-07-31 02:44:24 +0000556 Class classify(QualType Ty) const;
Rafael Espindolab33a3c42012-07-23 23:30:29 +0000557 ABIArgInfo classifyReturnType(QualType RetTy,
Aaron Ballman6c60c8d2012-02-22 03:04:13 +0000558 unsigned callingConvention) const;
Rafael Espindolab6932692012-10-24 01:58:58 +0000559 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &FreeRegs,
560 bool IsFastCall) const;
561 bool shouldUseInReg(QualType Ty, unsigned &FreeRegs,
Rafael Espindolae4aeeaa2012-10-24 01:59:00 +0000562 bool IsFastCall, bool &NeedsPadding) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000563
Rafael Espindolab33a3c42012-07-23 23:30:29 +0000564public:
565
Rafael Espindolaaa9cf8d2012-07-24 00:01:07 +0000566 virtual void computeInfo(CGFunctionInfo &FI) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000567 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
568 CodeGenFunction &CGF) const;
569
Chad Rosier1f1df1f2013-03-25 21:00:27 +0000570 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w,
Rafael Espindolab48280b2012-07-31 02:44:24 +0000571 unsigned r)
Eli Friedmanc3e0fb42011-07-08 23:31:17 +0000572 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +0000573 IsWin32StructABI(w), DefaultNumRegisterParameters(r) {}
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000574};
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000575
Anton Korobeynikov82d0a412010-01-10 12:58:08 +0000576class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
577public:
Eli Friedman55fc7e22012-01-25 22:46:34 +0000578 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
Chad Rosier1f1df1f2013-03-25 21:00:27 +0000579 bool d, bool p, bool w, unsigned r)
580 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {}
Charles Davis74f72932010-02-13 15:54:06 +0000581
John McCallb8b52972013-06-18 02:46:29 +0000582 static bool isStructReturnInRegABI(
583 const llvm::Triple &Triple, const CodeGenOptions &Opts);
584
Charles Davis74f72932010-02-13 15:54:06 +0000585 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
586 CodeGen::CodeGenModule &CGM) const;
John McCall6374c332010-03-06 00:35:14 +0000587
588 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
589 // Darwin uses different dwarf register numbers for EH.
John McCall64aa4b32013-04-16 22:48:15 +0000590 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
John McCall6374c332010-03-06 00:35:14 +0000591 return 4;
592 }
593
594 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
595 llvm::Value *Address) const;
Peter Collingbourne4b93d662011-02-19 23:03:58 +0000596
Jay Foadef6de3d2011-07-11 09:56:20 +0000597 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
Chris Lattner5f9e2722011-07-23 10:55:15 +0000598 StringRef Constraint,
Jay Foadef6de3d2011-07-11 09:56:20 +0000599 llvm::Type* Ty) const {
Peter Collingbourne4b93d662011-02-19 23:03:58 +0000600 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
601 }
602
Anton Korobeynikov82d0a412010-01-10 12:58:08 +0000603};
604
605}
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000606
607/// shouldReturnTypeInRegister - Determine if the given type should be
608/// passed in a register (for the Darwin ABI).
609bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
Aaron Ballman6c60c8d2012-02-22 03:04:13 +0000610 ASTContext &Context,
611 unsigned callingConvention) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000612 uint64_t Size = Context.getTypeSize(Ty);
613
614 // Type must be register sized.
615 if (!isRegisterSize(Size))
616 return false;
617
618 if (Ty->isVectorType()) {
619 // 64- and 128- bit vectors inside structures are not returned in
620 // registers.
621 if (Size == 64 || Size == 128)
622 return false;
623
624 return true;
625 }
626
Daniel Dunbar77115232010-05-15 00:00:30 +0000627 // If this is a builtin, pointer, enum, complex type, member pointer, or
628 // member function pointer it is ok.
Daniel Dunbara1842d32010-05-14 03:40:53 +0000629 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
Daniel Dunbar55e59e12009-09-24 05:12:36 +0000630 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
Daniel Dunbar77115232010-05-15 00:00:30 +0000631 Ty->isBlockPointerType() || Ty->isMemberPointerType())
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000632 return true;
633
634 // Arrays are treated like records.
635 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
Aaron Ballman6c60c8d2012-02-22 03:04:13 +0000636 return shouldReturnTypeInRegister(AT->getElementType(), Context,
637 callingConvention);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000638
639 // Otherwise, it must be a record type.
Ted Kremenek6217b802009-07-29 21:53:49 +0000640 const RecordType *RT = Ty->getAs<RecordType>();
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000641 if (!RT) return false;
642
Anders Carlssona8874232010-01-27 03:25:19 +0000643 // FIXME: Traverse bases here too.
644
Aaron Ballman6c60c8d2012-02-22 03:04:13 +0000645 // For thiscall conventions, structures will never be returned in
646 // a register. This is for compatibility with the MSVC ABI
647 if (callingConvention == llvm::CallingConv::X86_ThisCall &&
648 RT->isStructureType()) {
649 return false;
650 }
651
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000652 // Structure types are passed in register if all fields would be
653 // passed in a register.
Argyrios Kyrtzidis17945a02009-06-30 02:36:12 +0000654 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
655 e = RT->getDecl()->field_end(); i != e; ++i) {
David Blaikie581deb32012-06-06 20:45:41 +0000656 const FieldDecl *FD = *i;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000657
658 // Empty fields are ignored.
Daniel Dunbar98303b92009-09-13 08:03:58 +0000659 if (isEmptyField(Context, FD, true))
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000660 continue;
661
662 // Check fields recursively.
Aaron Ballman6c60c8d2012-02-22 03:04:13 +0000663 if (!shouldReturnTypeInRegister(FD->getType(), Context,
664 callingConvention))
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000665 return false;
666 }
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000667 return true;
668}
669
Aaron Ballman6c60c8d2012-02-22 03:04:13 +0000670ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
671 unsigned callingConvention) const {
Chris Lattnera3c109b2010-07-29 02:16:43 +0000672 if (RetTy->isVoidType())
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000673 return ABIArgInfo::getIgnore();
Michael J. Spencer8bea82f2010-08-25 18:17:27 +0000674
Chris Lattnera3c109b2010-07-29 02:16:43 +0000675 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000676 // On Darwin, some vectors are returned in registers.
David Chisnall1e4249c2009-08-17 23:08:21 +0000677 if (IsDarwinVectorABI) {
Chris Lattnera3c109b2010-07-29 02:16:43 +0000678 uint64_t Size = getContext().getTypeSize(RetTy);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000679
680 // 128-bit vectors are a special case; they are returned in
681 // registers and we need to make sure to pick a type the LLVM
682 // backend will like.
683 if (Size == 128)
Chris Lattner800588f2010-07-29 06:26:06 +0000684 return ABIArgInfo::getDirect(llvm::VectorType::get(
Chris Lattnera3c109b2010-07-29 02:16:43 +0000685 llvm::Type::getInt64Ty(getVMContext()), 2));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000686
687 // Always return in register if it fits in a general purpose
688 // register, or if it is 64 bits and has a single element.
689 if ((Size == 8 || Size == 16 || Size == 32) ||
690 (Size == 64 && VT->getNumElements() == 1))
Chris Lattner800588f2010-07-29 06:26:06 +0000691 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
Chris Lattnera3c109b2010-07-29 02:16:43 +0000692 Size));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000693
694 return ABIArgInfo::getIndirect(0);
695 }
696
697 return ABIArgInfo::getDirect();
Chris Lattnera3c109b2010-07-29 02:16:43 +0000698 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +0000699
John McCalld608cdb2010-08-22 10:59:02 +0000700 if (isAggregateTypeForABI(RetTy)) {
Anders Carlssona8874232010-01-27 03:25:19 +0000701 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +0000702 if (isRecordReturnIndirect(RT, CGT))
Anders Carlsson40092972009-10-20 22:07:59 +0000703 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
Michael J. Spencer8bea82f2010-08-25 18:17:27 +0000704
Anders Carlsson40092972009-10-20 22:07:59 +0000705 // Structures with flexible arrays are always indirect.
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000706 if (RT->getDecl()->hasFlexibleArrayMember())
707 return ABIArgInfo::getIndirect(0);
Anders Carlsson40092972009-10-20 22:07:59 +0000708 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +0000709
David Chisnall1e4249c2009-08-17 23:08:21 +0000710 // If specified, structs and unions are always indirect.
711 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000712 return ABIArgInfo::getIndirect(0);
713
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000714 // Small structures which are register sized are generally returned
715 // in a register.
Aaron Ballman6c60c8d2012-02-22 03:04:13 +0000716 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(),
717 callingConvention)) {
Chris Lattnera3c109b2010-07-29 02:16:43 +0000718 uint64_t Size = getContext().getTypeSize(RetTy);
Eli Friedmanbd4d3bc2011-11-18 01:25:50 +0000719
720 // As a special-case, if the struct is a "single-element" struct, and
721 // the field is of type "float" or "double", return it in a
Eli Friedman55fc7e22012-01-25 22:46:34 +0000722 // floating-point register. (MSVC does not apply this special case.)
723 // We apply a similar transformation for pointer types to improve the
724 // quality of the generated IR.
Eli Friedmanbd4d3bc2011-11-18 01:25:50 +0000725 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +0000726 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
Eli Friedman55fc7e22012-01-25 22:46:34 +0000727 || SeltTy->hasPointerRepresentation())
Eli Friedmanbd4d3bc2011-11-18 01:25:50 +0000728 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
729
730 // FIXME: We should be able to narrow this integer in cases with dead
731 // padding.
Chris Lattner800588f2010-07-29 06:26:06 +0000732 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000733 }
734
735 return ABIArgInfo::getIndirect(0);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000736 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +0000737
Chris Lattnera3c109b2010-07-29 02:16:43 +0000738 // Treat an enum type as its underlying type.
739 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
740 RetTy = EnumTy->getDecl()->getIntegerType();
741
742 return (RetTy->isPromotableIntegerType() ?
743 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000744}
745
Eli Friedmanf4bd4d82012-06-05 19:40:46 +0000746static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
747 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
748}
749
Daniel Dunbar93ae9472010-09-16 20:42:00 +0000750static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
751 const RecordType *RT = Ty->getAs<RecordType>();
752 if (!RT)
753 return 0;
754 const RecordDecl *RD = RT->getDecl();
755
756 // If this is a C++ record, check the bases first.
757 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
758 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
759 e = CXXRD->bases_end(); i != e; ++i)
760 if (!isRecordWithSSEVectorType(Context, i->getType()))
761 return false;
762
763 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
764 i != e; ++i) {
765 QualType FT = i->getType();
766
Eli Friedmanf4bd4d82012-06-05 19:40:46 +0000767 if (isSSEVectorType(Context, FT))
Daniel Dunbar93ae9472010-09-16 20:42:00 +0000768 return true;
769
770 if (isRecordWithSSEVectorType(Context, FT))
771 return true;
772 }
773
774 return false;
775}
776
Daniel Dunbare59d8582010-09-16 20:42:06 +0000777unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
778 unsigned Align) const {
779 // Otherwise, if the alignment is less than or equal to the minimum ABI
780 // alignment, just use the default; the backend will handle this.
Daniel Dunbarfb67d6c2010-09-16 20:41:56 +0000781 if (Align <= MinABIStackAlignInBytes)
Daniel Dunbare59d8582010-09-16 20:42:06 +0000782 return 0; // Use default alignment.
783
784 // On non-Darwin, the stack type alignment is always 4.
785 if (!IsDarwinVectorABI) {
786 // Set explicit alignment, since we may need to realign the top.
Daniel Dunbarfb67d6c2010-09-16 20:41:56 +0000787 return MinABIStackAlignInBytes;
Daniel Dunbare59d8582010-09-16 20:42:06 +0000788 }
Daniel Dunbarfb67d6c2010-09-16 20:41:56 +0000789
Daniel Dunbar93ae9472010-09-16 20:42:00 +0000790 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
Eli Friedmanf4bd4d82012-06-05 19:40:46 +0000791 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
792 isRecordWithSSEVectorType(getContext(), Ty)))
Daniel Dunbar93ae9472010-09-16 20:42:00 +0000793 return 16;
794
795 return MinABIStackAlignInBytes;
Daniel Dunbarfb67d6c2010-09-16 20:41:56 +0000796}
797
Rafael Espindola0b4cc952012-10-19 05:04:37 +0000798ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
799 unsigned &FreeRegs) const {
800 if (!ByVal) {
801 if (FreeRegs) {
802 --FreeRegs; // Non byval indirects just use one pointer.
803 return ABIArgInfo::getIndirectInReg(0, false);
804 }
Daniel Dunbar46c54fb2010-04-21 19:49:55 +0000805 return ABIArgInfo::getIndirect(0, false);
Rafael Espindola0b4cc952012-10-19 05:04:37 +0000806 }
Daniel Dunbar46c54fb2010-04-21 19:49:55 +0000807
Daniel Dunbare59d8582010-09-16 20:42:06 +0000808 // Compute the byval alignment.
809 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
810 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
811 if (StackAlign == 0)
Chris Lattnerde92d732011-05-22 23:35:00 +0000812 return ABIArgInfo::getIndirect(4);
Daniel Dunbare59d8582010-09-16 20:42:06 +0000813
814 // If the stack alignment is less than the type alignment, realign the
815 // argument.
816 if (StackAlign < TypeAlign)
817 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true,
818 /*Realign=*/true);
819
820 return ABIArgInfo::getIndirect(StackAlign);
Daniel Dunbardc6d5742010-04-21 19:10:51 +0000821}
822
Rafael Espindolab48280b2012-07-31 02:44:24 +0000823X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
824 const Type *T = isSingleElementStruct(Ty, getContext());
825 if (!T)
826 T = Ty.getTypePtr();
827
828 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
829 BuiltinType::Kind K = BT->getKind();
830 if (K == BuiltinType::Float || K == BuiltinType::Double)
831 return Float;
832 }
833 return Integer;
834}
835
Rafael Espindolab6932692012-10-24 01:58:58 +0000836bool X86_32ABIInfo::shouldUseInReg(QualType Ty, unsigned &FreeRegs,
Rafael Espindolae4aeeaa2012-10-24 01:59:00 +0000837 bool IsFastCall, bool &NeedsPadding) const {
838 NeedsPadding = false;
Rafael Espindolab48280b2012-07-31 02:44:24 +0000839 Class C = classify(Ty);
840 if (C == Float)
Rafael Espindola0b4cc952012-10-19 05:04:37 +0000841 return false;
Rafael Espindolab48280b2012-07-31 02:44:24 +0000842
Rafael Espindolab6932692012-10-24 01:58:58 +0000843 unsigned Size = getContext().getTypeSize(Ty);
844 unsigned SizeInRegs = (Size + 31) / 32;
Rafael Espindola5f14fcb2012-10-23 02:04:01 +0000845
846 if (SizeInRegs == 0)
847 return false;
848
Rafael Espindolab48280b2012-07-31 02:44:24 +0000849 if (SizeInRegs > FreeRegs) {
850 FreeRegs = 0;
Rafael Espindola0b4cc952012-10-19 05:04:37 +0000851 return false;
Rafael Espindolab48280b2012-07-31 02:44:24 +0000852 }
Rafael Espindola0b4cc952012-10-19 05:04:37 +0000853
Rafael Espindolab48280b2012-07-31 02:44:24 +0000854 FreeRegs -= SizeInRegs;
Rafael Espindolab6932692012-10-24 01:58:58 +0000855
856 if (IsFastCall) {
857 if (Size > 32)
858 return false;
859
860 if (Ty->isIntegralOrEnumerationType())
861 return true;
862
863 if (Ty->isPointerType())
864 return true;
865
866 if (Ty->isReferenceType())
867 return true;
868
Rafael Espindolae4aeeaa2012-10-24 01:59:00 +0000869 if (FreeRegs)
870 NeedsPadding = true;
871
Rafael Espindolab6932692012-10-24 01:58:58 +0000872 return false;
873 }
874
Rafael Espindola0b4cc952012-10-19 05:04:37 +0000875 return true;
Rafael Espindolab48280b2012-07-31 02:44:24 +0000876}
877
Rafael Espindola0b4cc952012-10-19 05:04:37 +0000878ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
Rafael Espindolab6932692012-10-24 01:58:58 +0000879 unsigned &FreeRegs,
880 bool IsFastCall) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000881 // FIXME: Set alignment on indirect arguments.
John McCalld608cdb2010-08-22 10:59:02 +0000882 if (isAggregateTypeForABI(Ty)) {
Anders Carlssona8874232010-01-27 03:25:19 +0000883 if (const RecordType *RT = Ty->getAs<RecordType>()) {
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +0000884 if (IsWin32StructABI)
885 return getIndirectResult(Ty, true, FreeRegs);
Daniel Dunbardc6d5742010-04-21 19:10:51 +0000886
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +0000887 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, CGT))
888 return getIndirectResult(Ty, RAA == CGCXXABI::RAA_DirectInMemory, FreeRegs);
889
890 // Structures with flexible arrays are always indirect.
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000891 if (RT->getDecl()->hasFlexibleArrayMember())
Rafael Espindola0b4cc952012-10-19 05:04:37 +0000892 return getIndirectResult(Ty, true, FreeRegs);
Anders Carlssona8874232010-01-27 03:25:19 +0000893 }
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000894
Eli Friedman5a4d3522011-11-18 00:28:11 +0000895 // Ignore empty structs/unions.
Eli Friedman5a1ac892011-11-18 04:01:36 +0000896 if (isEmptyRecord(getContext(), Ty, true))
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000897 return ABIArgInfo::getIgnore();
898
Rafael Espindolae4aeeaa2012-10-24 01:59:00 +0000899 llvm::LLVMContext &LLVMContext = getVMContext();
900 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
901 bool NeedsPadding;
902 if (shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding)) {
Rafael Espindola0b4cc952012-10-19 05:04:37 +0000903 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
Craig Topperb9bad792013-07-08 04:47:18 +0000904 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
Rafael Espindola0b4cc952012-10-19 05:04:37 +0000905 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
906 return ABIArgInfo::getDirectInReg(Result);
907 }
Rafael Espindolae4aeeaa2012-10-24 01:59:00 +0000908 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : 0;
Rafael Espindola0b4cc952012-10-19 05:04:37 +0000909
Daniel Dunbar53012f42009-11-09 01:33:53 +0000910 // Expand small (<= 128-bit) record types when we know that the stack layout
911 // of those arguments will match the struct. This is important because the
912 // LLVM backend isn't smart enough to remove byval, which inhibits many
913 // optimizations.
Chris Lattnera3c109b2010-07-29 02:16:43 +0000914 if (getContext().getTypeSize(Ty) <= 4*32 &&
915 canExpandIndirectArgument(Ty, getContext()))
Rafael Espindolae4aeeaa2012-10-24 01:59:00 +0000916 return ABIArgInfo::getExpandWithPadding(IsFastCall, PaddingType);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000917
Rafael Espindola0b4cc952012-10-19 05:04:37 +0000918 return getIndirectResult(Ty, true, FreeRegs);
Michael J. Spencer8bea82f2010-08-25 18:17:27 +0000919 }
920
Chris Lattnerbbae8b42010-08-26 20:05:13 +0000921 if (const VectorType *VT = Ty->getAs<VectorType>()) {
Chris Lattner7b733502010-08-26 20:08:43 +0000922 // On Darwin, some vectors are passed in memory, we handle this by passing
923 // it as an i8/i16/i32/i64.
Chris Lattnerbbae8b42010-08-26 20:05:13 +0000924 if (IsDarwinVectorABI) {
925 uint64_t Size = getContext().getTypeSize(Ty);
Chris Lattnerbbae8b42010-08-26 20:05:13 +0000926 if ((Size == 8 || Size == 16 || Size == 32) ||
927 (Size == 64 && VT->getNumElements() == 1))
928 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
929 Size));
Chris Lattnerbbae8b42010-08-26 20:05:13 +0000930 }
Bill Wendlingbb465d72010-10-18 03:41:31 +0000931
Chad Rosier1f1df1f2013-03-25 21:00:27 +0000932 if (IsX86_MMXType(CGT.ConvertType(Ty)))
933 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
Michael J. Spencer9cac4942010-10-19 06:39:39 +0000934
Chris Lattnerbbae8b42010-08-26 20:05:13 +0000935 return ABIArgInfo::getDirect();
936 }
Michael J. Spencer9cac4942010-10-19 06:39:39 +0000937
938
Chris Lattnera3c109b2010-07-29 02:16:43 +0000939 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
940 Ty = EnumTy->getDecl()->getIntegerType();
Douglas Gregoraa74a1e2010-02-02 20:10:50 +0000941
Rafael Espindolae4aeeaa2012-10-24 01:59:00 +0000942 bool NeedsPadding;
943 bool InReg = shouldUseInReg(Ty, FreeRegs, IsFastCall, NeedsPadding);
Rafael Espindola0b4cc952012-10-19 05:04:37 +0000944
945 if (Ty->isPromotableIntegerType()) {
946 if (InReg)
947 return ABIArgInfo::getExtendInReg();
948 return ABIArgInfo::getExtend();
949 }
950 if (InReg)
951 return ABIArgInfo::getDirectInReg();
952 return ABIArgInfo::getDirect();
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000953}
954
Rafael Espindolaaa9cf8d2012-07-24 00:01:07 +0000955void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
956 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
957 FI.getCallingConvention());
Rafael Espindolab48280b2012-07-31 02:44:24 +0000958
Rafael Espindolab6932692012-10-24 01:58:58 +0000959 unsigned CC = FI.getCallingConvention();
960 bool IsFastCall = CC == llvm::CallingConv::X86_FastCall;
961 unsigned FreeRegs;
962 if (IsFastCall)
963 FreeRegs = 2;
964 else if (FI.getHasRegParm())
965 FreeRegs = FI.getRegParm();
966 else
967 FreeRegs = DefaultNumRegisterParameters;
Rafael Espindolab48280b2012-07-31 02:44:24 +0000968
969 // If the return value is indirect, then the hidden argument is consuming one
970 // integer register.
971 if (FI.getReturnInfo().isIndirect() && FreeRegs) {
972 --FreeRegs;
973 ABIArgInfo &Old = FI.getReturnInfo();
974 Old = ABIArgInfo::getIndirectInReg(Old.getIndirectAlign(),
975 Old.getIndirectByVal(),
976 Old.getIndirectRealign());
977 }
978
Rafael Espindolaaa9cf8d2012-07-24 00:01:07 +0000979 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
980 it != ie; ++it)
Rafael Espindolab6932692012-10-24 01:58:58 +0000981 it->info = classifyArgumentType(it->type, FreeRegs, IsFastCall);
Rafael Espindolaaa9cf8d2012-07-24 00:01:07 +0000982}
983
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000984llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
985 CodeGenFunction &CGF) const {
Chris Lattner8b418682012-02-07 00:39:47 +0000986 llvm::Type *BPP = CGF.Int8PtrPtrTy;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +0000987
988 CGBuilderTy &Builder = CGF.Builder;
989 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
990 "ap");
991 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
Eli Friedman7b1fb812011-11-18 02:12:09 +0000992
993 // Compute if the address needs to be aligned
994 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
995 Align = getTypeStackAlignInBytes(Ty, Align);
996 Align = std::max(Align, 4U);
997 if (Align > 4) {
998 // addr = (addr + align - 1) & -align;
999 llvm::Value *Offset =
1000 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
1001 Addr = CGF.Builder.CreateGEP(Addr, Offset);
1002 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
1003 CGF.Int32Ty);
1004 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
1005 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1006 Addr->getType(),
1007 "ap.cur.aligned");
1008 }
1009
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001010 llvm::Type *PTy =
Owen Anderson96e0fc72009-07-29 22:16:19 +00001011 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001012 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1013
1014 uint64_t Offset =
Eli Friedman7b1fb812011-11-18 02:12:09 +00001015 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001016 llvm::Value *NextAddr =
Chris Lattner77b89b82010-06-27 07:15:29 +00001017 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001018 "ap.next");
1019 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1020
1021 return AddrTyped;
1022}
1023
Charles Davis74f72932010-02-13 15:54:06 +00001024void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
1025 llvm::GlobalValue *GV,
1026 CodeGen::CodeGenModule &CGM) const {
1027 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
1028 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1029 // Get the LLVM function.
1030 llvm::Function *Fn = cast<llvm::Function>(GV);
1031
1032 // Now add the 'alignstack' attribute with a value of 16.
Bill Wendling0d583392012-10-15 20:36:26 +00001033 llvm::AttrBuilder B;
Bill Wendlinge91e9ec2012-10-14 03:28:14 +00001034 B.addStackAlignmentAttr(16);
Bill Wendling909b6de2013-01-23 00:21:06 +00001035 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1036 llvm::AttributeSet::get(CGM.getLLVMContext(),
1037 llvm::AttributeSet::FunctionIndex,
1038 B));
Charles Davis74f72932010-02-13 15:54:06 +00001039 }
1040 }
1041}
1042
John McCall6374c332010-03-06 00:35:14 +00001043bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1044 CodeGen::CodeGenFunction &CGF,
1045 llvm::Value *Address) const {
1046 CodeGen::CGBuilderTy &Builder = CGF.Builder;
John McCall6374c332010-03-06 00:35:14 +00001047
Chris Lattner8b418682012-02-07 00:39:47 +00001048 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001049
John McCall6374c332010-03-06 00:35:14 +00001050 // 0-7 are the eight integer registers; the order is different
1051 // on Darwin (for EH), but the range is the same.
1052 // 8 is %eip.
John McCallaeeb7012010-05-27 06:19:26 +00001053 AssignToArrayRange(Builder, Address, Four8, 0, 8);
John McCall6374c332010-03-06 00:35:14 +00001054
John McCall64aa4b32013-04-16 22:48:15 +00001055 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
John McCall6374c332010-03-06 00:35:14 +00001056 // 12-16 are st(0..4). Not sure why we stop at 4.
1057 // These have size 16, which is sizeof(long double) on
1058 // platforms with 8-byte alignment for that type.
Chris Lattner8b418682012-02-07 00:39:47 +00001059 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
John McCallaeeb7012010-05-27 06:19:26 +00001060 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001061
John McCall6374c332010-03-06 00:35:14 +00001062 } else {
1063 // 9 is %eflags, which doesn't get a size on Darwin for some
1064 // reason.
1065 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
1066
1067 // 11-16 are st(0..5). Not sure why we stop at 5.
1068 // These have size 12, which is sizeof(long double) on
1069 // platforms with 4-byte alignment for that type.
Chris Lattner8b418682012-02-07 00:39:47 +00001070 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
John McCallaeeb7012010-05-27 06:19:26 +00001071 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1072 }
John McCall6374c332010-03-06 00:35:14 +00001073
1074 return false;
1075}
1076
Chris Lattnerdce5ad02010-06-28 20:05:43 +00001077//===----------------------------------------------------------------------===//
1078// X86-64 ABI Implementation
1079//===----------------------------------------------------------------------===//
1080
1081
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001082namespace {
1083/// X86_64ABIInfo - The X86_64 ABI information.
1084class X86_64ABIInfo : public ABIInfo {
1085 enum Class {
1086 Integer = 0,
1087 SSE,
1088 SSEUp,
1089 X87,
1090 X87Up,
1091 ComplexX87,
1092 NoClass,
1093 Memory
1094 };
1095
1096 /// merge - Implement the X86_64 ABI merging algorithm.
1097 ///
1098 /// Merge an accumulating classification \arg Accum with a field
1099 /// classification \arg Field.
1100 ///
1101 /// \param Accum - The accumulating classification. This should
1102 /// always be either NoClass or the result of a previous merge
1103 /// call. In addition, this should never be Memory (the caller
1104 /// should just return Memory for the aggregate).
Chris Lattner1090a9b2010-06-28 21:43:59 +00001105 static Class merge(Class Accum, Class Field);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001106
Bruno Cardoso Lopes4943c152011-07-11 22:41:29 +00001107 /// postMerge - Implement the X86_64 ABI post merging algorithm.
1108 ///
1109 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
1110 /// final MEMORY or SSE classes when necessary.
1111 ///
1112 /// \param AggregateSize - The size of the current aggregate in
1113 /// the classification process.
1114 ///
1115 /// \param Lo - The classification for the parts of the type
1116 /// residing in the low word of the containing object.
1117 ///
1118 /// \param Hi - The classification for the parts of the type
1119 /// residing in the higher words of the containing object.
1120 ///
1121 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
1122
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001123 /// classify - Determine the x86_64 register classes in which the
1124 /// given type T should be passed.
1125 ///
1126 /// \param Lo - The classification for the parts of the type
1127 /// residing in the low word of the containing object.
1128 ///
1129 /// \param Hi - The classification for the parts of the type
1130 /// residing in the high word of the containing object.
1131 ///
1132 /// \param OffsetBase - The bit offset of this type in the
1133 /// containing object. Some parameters are classified different
1134 /// depending on whether they straddle an eightbyte boundary.
1135 ///
Eli Friedman7a1b5862013-06-12 00:13:45 +00001136 /// \param isNamedArg - Whether the argument in question is a "named"
1137 /// argument, as used in AMD64-ABI 3.5.7.
1138 ///
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001139 /// If a word is unused its result will be NoClass; if a type should
1140 /// be passed in Memory then at least the classification of \arg Lo
1141 /// will be Memory.
1142 ///
Sylvestre Ledruf3477c12012-09-27 10:16:10 +00001143 /// The \arg Lo class will be NoClass iff the argument is ignored.
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001144 ///
1145 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
1146 /// also be ComplexX87.
Eli Friedman7a1b5862013-06-12 00:13:45 +00001147 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1148 bool isNamedArg) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001149
Bruno Cardoso Lopes4943c152011-07-11 22:41:29 +00001150 llvm::Type *GetByteVectorType(QualType Ty) const;
Chris Lattner9cbe4f02011-07-09 17:41:47 +00001151 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1152 unsigned IROffset, QualType SourceTy,
1153 unsigned SourceOffset) const;
1154 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1155 unsigned IROffset, QualType SourceTy,
1156 unsigned SourceOffset) const;
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001157
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001158 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
Daniel Dunbar46c54fb2010-04-21 19:49:55 +00001159 /// such that the argument will be returned in memory.
Chris Lattner9c254f02010-06-29 06:01:59 +00001160 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
Daniel Dunbar46c54fb2010-04-21 19:49:55 +00001161
1162 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001163 /// such that the argument will be passed in memory.
Daniel Dunbaredfac032012-03-10 01:03:58 +00001164 ///
1165 /// \param freeIntRegs - The number of free integer registers remaining
1166 /// available.
1167 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001168
Chris Lattnera3c109b2010-07-29 02:16:43 +00001169 ABIArgInfo classifyReturnType(QualType RetTy) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001170
Bill Wendlingbb465d72010-10-18 03:41:31 +00001171 ABIArgInfo classifyArgumentType(QualType Ty,
Daniel Dunbaredfac032012-03-10 01:03:58 +00001172 unsigned freeIntRegs,
Bill Wendlingbb465d72010-10-18 03:41:31 +00001173 unsigned &neededInt,
Eli Friedman7a1b5862013-06-12 00:13:45 +00001174 unsigned &neededSSE,
1175 bool isNamedArg) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001176
Eli Friedmanee1ad992011-12-02 00:11:43 +00001177 bool IsIllegalVectorType(QualType Ty) const;
1178
John McCall67a57732011-04-21 01:20:55 +00001179 /// The 0.98 ABI revision clarified a lot of ambiguities,
1180 /// unfortunately in ways that were not always consistent with
1181 /// certain previous compilers. In particular, platforms which
1182 /// required strict binary compatibility with older versions of GCC
1183 /// may need to exempt themselves.
1184 bool honorsRevision0_98() const {
John McCall64aa4b32013-04-16 22:48:15 +00001185 return !getTarget().getTriple().isOSDarwin();
John McCall67a57732011-04-21 01:20:55 +00001186 }
1187
Eli Friedmanee1ad992011-12-02 00:11:43 +00001188 bool HasAVX;
Derek Schuffbabaf312012-10-11 15:52:22 +00001189 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
1190 // 64-bit hardware.
1191 bool Has64BitPointers;
Eli Friedmanee1ad992011-12-02 00:11:43 +00001192
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001193public:
Eli Friedmanee1ad992011-12-02 00:11:43 +00001194 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
Derek Schuffbabaf312012-10-11 15:52:22 +00001195 ABIInfo(CGT), HasAVX(hasavx),
Derek Schuff90da80c2012-10-11 18:21:13 +00001196 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
Derek Schuffbabaf312012-10-11 15:52:22 +00001197 }
Chris Lattner9c254f02010-06-29 06:01:59 +00001198
John McCallde5d3c72012-02-17 03:33:10 +00001199 bool isPassedUsingAVXType(QualType type) const {
1200 unsigned neededInt, neededSSE;
Daniel Dunbaredfac032012-03-10 01:03:58 +00001201 // The freeIntRegs argument doesn't matter here.
Eli Friedman7a1b5862013-06-12 00:13:45 +00001202 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1203 /*isNamedArg*/true);
John McCallde5d3c72012-02-17 03:33:10 +00001204 if (info.isDirect()) {
1205 llvm::Type *ty = info.getCoerceToType();
1206 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1207 return (vectorTy->getBitWidth() > 128);
1208 }
1209 return false;
1210 }
1211
Chris Lattneree5dcd02010-07-29 02:31:05 +00001212 virtual void computeInfo(CGFunctionInfo &FI) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001213
1214 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1215 CodeGenFunction &CGF) const;
1216};
Anton Korobeynikov82d0a412010-01-10 12:58:08 +00001217
Chris Lattnerf13721d2010-08-31 16:44:54 +00001218/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
NAKAMURA Takumia7573222011-01-17 22:56:31 +00001219class WinX86_64ABIInfo : public ABIInfo {
1220
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00001221 ABIArgInfo classify(QualType Ty, bool IsReturnType) const;
NAKAMURA Takumia7573222011-01-17 22:56:31 +00001222
Chris Lattnerf13721d2010-08-31 16:44:54 +00001223public:
NAKAMURA Takumia7573222011-01-17 22:56:31 +00001224 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
1225
1226 virtual void computeInfo(CGFunctionInfo &FI) const;
Chris Lattnerf13721d2010-08-31 16:44:54 +00001227
1228 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1229 CodeGenFunction &CGF) const;
1230};
1231
Anton Korobeynikov82d0a412010-01-10 12:58:08 +00001232class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1233public:
Eli Friedmanee1ad992011-12-02 00:11:43 +00001234 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
Derek Schuffbabaf312012-10-11 15:52:22 +00001235 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
John McCall6374c332010-03-06 00:35:14 +00001236
John McCallde5d3c72012-02-17 03:33:10 +00001237 const X86_64ABIInfo &getABIInfo() const {
1238 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
1239 }
1240
John McCall6374c332010-03-06 00:35:14 +00001241 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
1242 return 7;
1243 }
1244
1245 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1246 llvm::Value *Address) const {
Chris Lattner8b418682012-02-07 00:39:47 +00001247 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001248
John McCallaeeb7012010-05-27 06:19:26 +00001249 // 0-15 are the 16 integer registers.
1250 // 16 is %rip.
Chris Lattner8b418682012-02-07 00:39:47 +00001251 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
John McCall6374c332010-03-06 00:35:14 +00001252 return false;
1253 }
Peter Collingbourne4b93d662011-02-19 23:03:58 +00001254
Jay Foadef6de3d2011-07-11 09:56:20 +00001255 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
Chris Lattner5f9e2722011-07-23 10:55:15 +00001256 StringRef Constraint,
Jay Foadef6de3d2011-07-11 09:56:20 +00001257 llvm::Type* Ty) const {
Peter Collingbourne4b93d662011-02-19 23:03:58 +00001258 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1259 }
1260
John McCallde5d3c72012-02-17 03:33:10 +00001261 bool isNoProtoCallVariadic(const CallArgList &args,
1262 const FunctionNoProtoType *fnType) const {
John McCall01f151e2011-09-21 08:08:30 +00001263 // The default CC on x86-64 sets %al to the number of SSA
1264 // registers used, and GCC sets this when calling an unprototyped
Eli Friedman3ed79032011-12-01 04:53:19 +00001265 // function, so we override the default behavior. However, don't do
Eli Friedman68805fe2011-12-06 03:08:26 +00001266 // that when AVX types are involved: the ABI explicitly states it is
1267 // undefined, and it doesn't work in practice because of how the ABI
1268 // defines varargs anyway.
John McCallde5d3c72012-02-17 03:33:10 +00001269 if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) {
Eli Friedman3ed79032011-12-01 04:53:19 +00001270 bool HasAVXType = false;
John McCallde5d3c72012-02-17 03:33:10 +00001271 for (CallArgList::const_iterator
1272 it = args.begin(), ie = args.end(); it != ie; ++it) {
1273 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
1274 HasAVXType = true;
1275 break;
Eli Friedman3ed79032011-12-01 04:53:19 +00001276 }
1277 }
John McCallde5d3c72012-02-17 03:33:10 +00001278
Eli Friedman3ed79032011-12-01 04:53:19 +00001279 if (!HasAVXType)
1280 return true;
1281 }
John McCall01f151e2011-09-21 08:08:30 +00001282
John McCallde5d3c72012-02-17 03:33:10 +00001283 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
John McCall01f151e2011-09-21 08:08:30 +00001284 }
1285
Anton Korobeynikov82d0a412010-01-10 12:58:08 +00001286};
1287
Aaron Ballman89735b92013-05-24 15:06:56 +00001288static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
1289 // If the argument does not end in .lib, automatically add the suffix. This
1290 // matches the behavior of MSVC.
1291 std::string ArgStr = Lib;
1292 if (Lib.size() <= 4 ||
1293 Lib.substr(Lib.size() - 4).compare_lower(".lib") != 0) {
1294 ArgStr += ".lib";
1295 }
1296 return ArgStr;
1297}
1298
Reid Kleckner3190ca92013-05-08 13:44:39 +00001299class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
1300public:
John McCallb8b52972013-06-18 02:46:29 +00001301 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1302 bool d, bool p, bool w, unsigned RegParms)
1303 : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {}
Reid Kleckner3190ca92013-05-08 13:44:39 +00001304
1305 void getDependentLibraryOption(llvm::StringRef Lib,
1306 llvm::SmallString<24> &Opt) const {
1307 Opt = "/DEFAULTLIB:";
Aaron Ballman89735b92013-05-24 15:06:56 +00001308 Opt += qualifyWindowsLibrary(Lib);
Reid Kleckner3190ca92013-05-08 13:44:39 +00001309 }
Aaron Ballmana7ff62f2013-06-04 02:07:14 +00001310
1311 void getDetectMismatchOption(llvm::StringRef Name,
1312 llvm::StringRef Value,
1313 llvm::SmallString<32> &Opt) const {
Eli Friedman572ac322013-06-07 22:42:22 +00001314 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
Aaron Ballmana7ff62f2013-06-04 02:07:14 +00001315 }
Reid Kleckner3190ca92013-05-08 13:44:39 +00001316};
1317
Chris Lattnerf13721d2010-08-31 16:44:54 +00001318class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1319public:
1320 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
1321 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
1322
1323 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
1324 return 7;
1325 }
1326
1327 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1328 llvm::Value *Address) const {
Chris Lattner8b418682012-02-07 00:39:47 +00001329 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
Michael J. Spencer9cac4942010-10-19 06:39:39 +00001330
Chris Lattnerf13721d2010-08-31 16:44:54 +00001331 // 0-15 are the 16 integer registers.
1332 // 16 is %rip.
Chris Lattner8b418682012-02-07 00:39:47 +00001333 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
Chris Lattnerf13721d2010-08-31 16:44:54 +00001334 return false;
1335 }
Reid Kleckner3190ca92013-05-08 13:44:39 +00001336
1337 void getDependentLibraryOption(llvm::StringRef Lib,
1338 llvm::SmallString<24> &Opt) const {
1339 Opt = "/DEFAULTLIB:";
Aaron Ballman89735b92013-05-24 15:06:56 +00001340 Opt += qualifyWindowsLibrary(Lib);
Reid Kleckner3190ca92013-05-08 13:44:39 +00001341 }
Aaron Ballmana7ff62f2013-06-04 02:07:14 +00001342
1343 void getDetectMismatchOption(llvm::StringRef Name,
1344 llvm::StringRef Value,
1345 llvm::SmallString<32> &Opt) const {
Eli Friedman572ac322013-06-07 22:42:22 +00001346 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
Aaron Ballmana7ff62f2013-06-04 02:07:14 +00001347 }
Chris Lattnerf13721d2010-08-31 16:44:54 +00001348};
1349
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001350}
1351
Bruno Cardoso Lopes4943c152011-07-11 22:41:29 +00001352void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
1353 Class &Hi) const {
1354 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1355 //
1356 // (a) If one of the classes is Memory, the whole argument is passed in
1357 // memory.
1358 //
1359 // (b) If X87UP is not preceded by X87, the whole argument is passed in
1360 // memory.
1361 //
1362 // (c) If the size of the aggregate exceeds two eightbytes and the first
1363 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
1364 // argument is passed in memory. NOTE: This is necessary to keep the
1365 // ABI working for processors that don't support the __m256 type.
1366 //
1367 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
1368 //
1369 // Some of these are enforced by the merging logic. Others can arise
1370 // only with unions; for example:
1371 // union { _Complex double; unsigned; }
1372 //
1373 // Note that clauses (b) and (c) were added in 0.98.
1374 //
1375 if (Hi == Memory)
1376 Lo = Memory;
1377 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1378 Lo = Memory;
1379 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1380 Lo = Memory;
1381 if (Hi == SSEUp && Lo != SSE)
1382 Hi = SSE;
1383}
1384
Chris Lattner1090a9b2010-06-28 21:43:59 +00001385X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001386 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
1387 // classified recursively so that always two fields are
1388 // considered. The resulting class is calculated according to
1389 // the classes of the fields in the eightbyte:
1390 //
1391 // (a) If both classes are equal, this is the resulting class.
1392 //
1393 // (b) If one of the classes is NO_CLASS, the resulting class is
1394 // the other class.
1395 //
1396 // (c) If one of the classes is MEMORY, the result is the MEMORY
1397 // class.
1398 //
1399 // (d) If one of the classes is INTEGER, the result is the
1400 // INTEGER.
1401 //
1402 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
1403 // MEMORY is used as class.
1404 //
1405 // (f) Otherwise class SSE is used.
1406
1407 // Accum should never be memory (we should have returned) or
1408 // ComplexX87 (because this cannot be passed in a structure).
1409 assert((Accum != Memory && Accum != ComplexX87) &&
1410 "Invalid accumulated classification during merge.");
1411 if (Accum == Field || Field == NoClass)
1412 return Accum;
Chris Lattner1090a9b2010-06-28 21:43:59 +00001413 if (Field == Memory)
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001414 return Memory;
Chris Lattner1090a9b2010-06-28 21:43:59 +00001415 if (Accum == NoClass)
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001416 return Field;
Chris Lattner1090a9b2010-06-28 21:43:59 +00001417 if (Accum == Integer || Field == Integer)
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001418 return Integer;
Chris Lattner1090a9b2010-06-28 21:43:59 +00001419 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1420 Accum == X87 || Accum == X87Up)
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001421 return Memory;
Chris Lattner1090a9b2010-06-28 21:43:59 +00001422 return SSE;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001423}
1424
Chris Lattnerbcaedae2010-06-30 19:14:05 +00001425void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Eli Friedman7a1b5862013-06-12 00:13:45 +00001426 Class &Lo, Class &Hi, bool isNamedArg) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001427 // FIXME: This code can be simplified by introducing a simple value class for
1428 // Class pairs with appropriate constructor methods for the various
1429 // situations.
1430
1431 // FIXME: Some of the split computations are wrong; unaligned vectors
1432 // shouldn't be passed in registers for example, so there is no chance they
1433 // can straddle an eightbyte. Verify & simplify.
1434
1435 Lo = Hi = NoClass;
1436
1437 Class &Current = OffsetBase < 64 ? Lo : Hi;
1438 Current = Memory;
1439
John McCall183700f2009-09-21 23:43:11 +00001440 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001441 BuiltinType::Kind k = BT->getKind();
1442
1443 if (k == BuiltinType::Void) {
1444 Current = NoClass;
1445 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1446 Lo = Integer;
1447 Hi = Integer;
1448 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1449 Current = Integer;
Derek Schuff7da46f92012-10-11 16:55:58 +00001450 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) ||
1451 (k == BuiltinType::LongDouble &&
John McCall64aa4b32013-04-16 22:48:15 +00001452 getTarget().getTriple().getOS() == llvm::Triple::NaCl)) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001453 Current = SSE;
1454 } else if (k == BuiltinType::LongDouble) {
1455 Lo = X87;
1456 Hi = X87Up;
1457 }
1458 // FIXME: _Decimal32 and _Decimal64 are SSE.
1459 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
Chris Lattner1090a9b2010-06-28 21:43:59 +00001460 return;
1461 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001462
Chris Lattner1090a9b2010-06-28 21:43:59 +00001463 if (const EnumType *ET = Ty->getAs<EnumType>()) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001464 // Classify the underlying integer type.
Eli Friedman7a1b5862013-06-12 00:13:45 +00001465 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
Chris Lattner1090a9b2010-06-28 21:43:59 +00001466 return;
1467 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001468
Chris Lattner1090a9b2010-06-28 21:43:59 +00001469 if (Ty->hasPointerRepresentation()) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001470 Current = Integer;
Chris Lattner1090a9b2010-06-28 21:43:59 +00001471 return;
1472 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001473
Chris Lattner1090a9b2010-06-28 21:43:59 +00001474 if (Ty->isMemberPointerType()) {
Derek Schuffbabaf312012-10-11 15:52:22 +00001475 if (Ty->isMemberFunctionPointerType() && Has64BitPointers)
Daniel Dunbar67d438d2010-05-15 00:00:37 +00001476 Lo = Hi = Integer;
1477 else
1478 Current = Integer;
Chris Lattner1090a9b2010-06-28 21:43:59 +00001479 return;
1480 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001481
Chris Lattner1090a9b2010-06-28 21:43:59 +00001482 if (const VectorType *VT = Ty->getAs<VectorType>()) {
Chris Lattnerea044322010-07-29 02:01:43 +00001483 uint64_t Size = getContext().getTypeSize(VT);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001484 if (Size == 32) {
1485 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
1486 // float> as integer.
1487 Current = Integer;
1488
1489 // If this type crosses an eightbyte boundary, it should be
1490 // split.
1491 uint64_t EB_Real = (OffsetBase) / 64;
1492 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
1493 if (EB_Real != EB_Imag)
1494 Hi = Lo;
1495 } else if (Size == 64) {
1496 // gcc passes <1 x double> in memory. :(
1497 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
1498 return;
1499
1500 // gcc passes <1 x long long> as INTEGER.
Chris Lattner473f8e72010-08-26 18:03:20 +00001501 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
Chris Lattner0fefa412010-08-26 18:13:50 +00001502 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
1503 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
1504 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001505 Current = Integer;
1506 else
1507 Current = SSE;
1508
1509 // If this type crosses an eightbyte boundary, it should be
1510 // split.
1511 if (OffsetBase && OffsetBase != 64)
1512 Hi = Lo;
Eli Friedman7a1b5862013-06-12 00:13:45 +00001513 } else if (Size == 128 || (HasAVX && isNamedArg && Size == 256)) {
Bruno Cardoso Lopes4943c152011-07-11 22:41:29 +00001514 // Arguments of 256-bits are split into four eightbyte chunks. The
1515 // least significant one belongs to class SSE and all the others to class
1516 // SSEUP. The original Lo and Hi design considers that types can't be
1517 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
1518 // This design isn't correct for 256-bits, but since there're no cases
1519 // where the upper parts would need to be inspected, avoid adding
1520 // complexity and just consider Hi to match the 64-256 part.
Eli Friedman7a1b5862013-06-12 00:13:45 +00001521 //
1522 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
1523 // registers if they are "named", i.e. not part of the "..." of a
1524 // variadic function.
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001525 Lo = SSE;
1526 Hi = SSEUp;
1527 }
Chris Lattner1090a9b2010-06-28 21:43:59 +00001528 return;
1529 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001530
Chris Lattner1090a9b2010-06-28 21:43:59 +00001531 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
Chris Lattnerea044322010-07-29 02:01:43 +00001532 QualType ET = getContext().getCanonicalType(CT->getElementType());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001533
Chris Lattnerea044322010-07-29 02:01:43 +00001534 uint64_t Size = getContext().getTypeSize(Ty);
Douglas Gregor2ade35e2010-06-16 00:17:44 +00001535 if (ET->isIntegralOrEnumerationType()) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001536 if (Size <= 64)
1537 Current = Integer;
1538 else if (Size <= 128)
1539 Lo = Hi = Integer;
Chris Lattnerea044322010-07-29 02:01:43 +00001540 } else if (ET == getContext().FloatTy)
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001541 Current = SSE;
Derek Schuff7da46f92012-10-11 16:55:58 +00001542 else if (ET == getContext().DoubleTy ||
1543 (ET == getContext().LongDoubleTy &&
John McCall64aa4b32013-04-16 22:48:15 +00001544 getTarget().getTriple().getOS() == llvm::Triple::NaCl))
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001545 Lo = Hi = SSE;
Chris Lattnerea044322010-07-29 02:01:43 +00001546 else if (ET == getContext().LongDoubleTy)
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001547 Current = ComplexX87;
1548
1549 // If this complex type crosses an eightbyte boundary then it
1550 // should be split.
1551 uint64_t EB_Real = (OffsetBase) / 64;
Chris Lattnerea044322010-07-29 02:01:43 +00001552 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001553 if (Hi == NoClass && EB_Real != EB_Imag)
1554 Hi = Lo;
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001555
Chris Lattner1090a9b2010-06-28 21:43:59 +00001556 return;
1557 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001558
Chris Lattnerea044322010-07-29 02:01:43 +00001559 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001560 // Arrays are treated like structures.
1561
Chris Lattnerea044322010-07-29 02:01:43 +00001562 uint64_t Size = getContext().getTypeSize(Ty);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001563
1564 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
Bruno Cardoso Lopes4943c152011-07-11 22:41:29 +00001565 // than four eightbytes, ..., it has class MEMORY.
1566 if (Size > 256)
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001567 return;
1568
1569 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1570 // fields, it has class MEMORY.
1571 //
1572 // Only need to check alignment of array base.
Chris Lattnerea044322010-07-29 02:01:43 +00001573 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001574 return;
1575
1576 // Otherwise implement simplified merge. We could be smarter about
1577 // this, but it isn't worth it and would be harder to verify.
1578 Current = NoClass;
Chris Lattnerea044322010-07-29 02:01:43 +00001579 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001580 uint64_t ArraySize = AT->getSize().getZExtValue();
Bruno Cardoso Lopes089d8922011-07-12 01:27:38 +00001581
1582 // The only case a 256-bit wide vector could be used is when the array
1583 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1584 // to work for sizes wider than 128, early check and fallback to memory.
1585 if (Size > 128 && EltSize != 256)
1586 return;
1587
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001588 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
1589 Class FieldLo, FieldHi;
Eli Friedman7a1b5862013-06-12 00:13:45 +00001590 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001591 Lo = merge(Lo, FieldLo);
1592 Hi = merge(Hi, FieldHi);
1593 if (Lo == Memory || Hi == Memory)
1594 break;
1595 }
1596
Bruno Cardoso Lopes4943c152011-07-11 22:41:29 +00001597 postMerge(Size, Lo, Hi);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001598 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
Chris Lattner1090a9b2010-06-28 21:43:59 +00001599 return;
1600 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001601
Chris Lattner1090a9b2010-06-28 21:43:59 +00001602 if (const RecordType *RT = Ty->getAs<RecordType>()) {
Chris Lattnerea044322010-07-29 02:01:43 +00001603 uint64_t Size = getContext().getTypeSize(Ty);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001604
1605 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
Bruno Cardoso Lopes4943c152011-07-11 22:41:29 +00001606 // than four eightbytes, ..., it has class MEMORY.
1607 if (Size > 256)
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001608 return;
1609
Anders Carlsson0a8f8472009-09-16 15:53:40 +00001610 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
1611 // copy constructor or a non-trivial destructor, it is passed by invisible
1612 // reference.
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00001613 if (getRecordArgABI(RT, CGT))
Anders Carlsson0a8f8472009-09-16 15:53:40 +00001614 return;
Daniel Dunbarce9f4232009-11-22 23:01:23 +00001615
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001616 const RecordDecl *RD = RT->getDecl();
1617
1618 // Assume variable sized types are passed in memory.
1619 if (RD->hasFlexibleArrayMember())
1620 return;
1621
Chris Lattnerea044322010-07-29 02:01:43 +00001622 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001623
1624 // Reset Lo class, this will be recomputed.
1625 Current = NoClass;
Daniel Dunbarce9f4232009-11-22 23:01:23 +00001626
1627 // If this is a C++ record, classify the bases first.
1628 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1629 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
1630 e = CXXRD->bases_end(); i != e; ++i) {
1631 assert(!i->isVirtual() && !i->getType()->isDependentType() &&
1632 "Unexpected base class!");
1633 const CXXRecordDecl *Base =
1634 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
1635
1636 // Classify this field.
1637 //
1638 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
1639 // single eightbyte, each is classified separately. Each eightbyte gets
1640 // initialized to class NO_CLASS.
1641 Class FieldLo, FieldHi;
Benjamin Kramerd4f51982012-07-04 18:45:14 +00001642 uint64_t Offset =
1643 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
Eli Friedman7a1b5862013-06-12 00:13:45 +00001644 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
Daniel Dunbarce9f4232009-11-22 23:01:23 +00001645 Lo = merge(Lo, FieldLo);
1646 Hi = merge(Hi, FieldHi);
1647 if (Lo == Memory || Hi == Memory)
1648 break;
1649 }
1650 }
1651
1652 // Classify the fields one at a time, merging the results.
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001653 unsigned idx = 0;
Bruno Cardoso Lopes548e4782011-07-12 22:30:58 +00001654 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
Argyrios Kyrtzidis17945a02009-06-30 02:36:12 +00001655 i != e; ++i, ++idx) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001656 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1657 bool BitField = i->isBitField();
1658
Bruno Cardoso Lopesb8981df2011-07-13 21:58:55 +00001659 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
1660 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001661 //
Bruno Cardoso Lopesb8981df2011-07-13 21:58:55 +00001662 // The only case a 256-bit wide vector could be used is when the struct
1663 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1664 // to work for sizes wider than 128, early check and fallback to memory.
1665 //
1666 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
1667 Lo = Memory;
1668 return;
1669 }
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001670 // Note, skip this test for bit-fields, see below.
Chris Lattnerea044322010-07-29 02:01:43 +00001671 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001672 Lo = Memory;
1673 return;
1674 }
1675
1676 // Classify this field.
1677 //
1678 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
1679 // exceeds a single eightbyte, each is classified
1680 // separately. Each eightbyte gets initialized to class
1681 // NO_CLASS.
1682 Class FieldLo, FieldHi;
1683
1684 // Bit-fields require special handling, they do not force the
1685 // structure to be passed in memory even if unaligned, and
1686 // therefore they can straddle an eightbyte.
1687 if (BitField) {
1688 // Ignore padding bit-fields.
1689 if (i->isUnnamedBitfield())
1690 continue;
1691
1692 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
Richard Smitha6b8b2c2011-10-10 18:28:20 +00001693 uint64_t Size = i->getBitWidthValue(getContext());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001694
1695 uint64_t EB_Lo = Offset / 64;
1696 uint64_t EB_Hi = (Offset + Size - 1) / 64;
1697 FieldLo = FieldHi = NoClass;
1698 if (EB_Lo) {
1699 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
1700 FieldLo = NoClass;
1701 FieldHi = Integer;
1702 } else {
1703 FieldLo = Integer;
1704 FieldHi = EB_Hi ? Integer : NoClass;
1705 }
1706 } else
Eli Friedman7a1b5862013-06-12 00:13:45 +00001707 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001708 Lo = merge(Lo, FieldLo);
1709 Hi = merge(Hi, FieldHi);
1710 if (Lo == Memory || Hi == Memory)
1711 break;
1712 }
1713
Bruno Cardoso Lopes4943c152011-07-11 22:41:29 +00001714 postMerge(Size, Lo, Hi);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001715 }
1716}
1717
Chris Lattner9c254f02010-06-29 06:01:59 +00001718ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
Daniel Dunbar46c54fb2010-04-21 19:49:55 +00001719 // If this is a scalar LLVM value then assume LLVM will pass it in the right
1720 // place naturally.
John McCalld608cdb2010-08-22 10:59:02 +00001721 if (!isAggregateTypeForABI(Ty)) {
Daniel Dunbar46c54fb2010-04-21 19:49:55 +00001722 // Treat an enum type as its underlying type.
1723 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1724 Ty = EnumTy->getDecl()->getIntegerType();
1725
1726 return (Ty->isPromotableIntegerType() ?
1727 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1728 }
1729
1730 return ABIArgInfo::getIndirect(0);
1731}
1732
Eli Friedmanee1ad992011-12-02 00:11:43 +00001733bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
1734 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
1735 uint64_t Size = getContext().getTypeSize(VecTy);
1736 unsigned LargestVector = HasAVX ? 256 : 128;
1737 if (Size <= 64 || Size > LargestVector)
1738 return true;
1739 }
1740
1741 return false;
1742}
1743
Daniel Dunbaredfac032012-03-10 01:03:58 +00001744ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
1745 unsigned freeIntRegs) const {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001746 // If this is a scalar LLVM value then assume LLVM will pass it in the right
1747 // place naturally.
Daniel Dunbaredfac032012-03-10 01:03:58 +00001748 //
1749 // This assumption is optimistic, as there could be free registers available
1750 // when we need to pass this argument in memory, and LLVM could try to pass
1751 // the argument in the free register. This does not seem to happen currently,
1752 // but this code would be much safer if we could mark the argument with
1753 // 'onstack'. See PR12193.
Eli Friedmanee1ad992011-12-02 00:11:43 +00001754 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
Douglas Gregoraa74a1e2010-02-02 20:10:50 +00001755 // Treat an enum type as its underlying type.
1756 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1757 Ty = EnumTy->getDecl()->getIntegerType();
1758
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +00001759 return (Ty->isPromotableIntegerType() ?
1760 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Douglas Gregoraa74a1e2010-02-02 20:10:50 +00001761 }
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001762
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00001763 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
1764 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
Anders Carlsson0a8f8472009-09-16 15:53:40 +00001765
Chris Lattner855d2272011-05-22 23:21:23 +00001766 // Compute the byval alignment. We specify the alignment of the byval in all
1767 // cases so that the mid-level optimizer knows the alignment of the byval.
1768 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
Daniel Dunbaredfac032012-03-10 01:03:58 +00001769
1770 // Attempt to avoid passing indirect results using byval when possible. This
1771 // is important for good codegen.
1772 //
1773 // We do this by coercing the value into a scalar type which the backend can
1774 // handle naturally (i.e., without using byval).
1775 //
1776 // For simplicity, we currently only do this when we have exhausted all of the
1777 // free integer registers. Doing this when there are free integer registers
1778 // would require more care, as we would have to ensure that the coerced value
1779 // did not claim the unused register. That would require either reording the
1780 // arguments to the function (so that any subsequent inreg values came first),
1781 // or only doing this optimization when there were no following arguments that
1782 // might be inreg.
1783 //
1784 // We currently expect it to be rare (particularly in well written code) for
1785 // arguments to be passed on the stack when there are still free integer
1786 // registers available (this would typically imply large structs being passed
1787 // by value), so this seems like a fair tradeoff for now.
1788 //
1789 // We can revisit this if the backend grows support for 'onstack' parameter
1790 // attributes. See PR12193.
1791 if (freeIntRegs == 0) {
1792 uint64_t Size = getContext().getTypeSize(Ty);
1793
1794 // If this type fits in an eightbyte, coerce it into the matching integral
1795 // type, which will end up on the stack (with alignment 8).
1796 if (Align == 8 && Size <= 64)
1797 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1798 Size));
1799 }
1800
Chris Lattner855d2272011-05-22 23:21:23 +00001801 return ABIArgInfo::getIndirect(Align);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00001802}
1803
Bruno Cardoso Lopes4943c152011-07-11 22:41:29 +00001804/// GetByteVectorType - The ABI specifies that a value should be passed in an
1805/// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a
Chris Lattner0f408f52010-07-29 04:56:46 +00001806/// vector register.
Bruno Cardoso Lopes4943c152011-07-11 22:41:29 +00001807llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
Chris Lattner9cbe4f02011-07-09 17:41:47 +00001808 llvm::Type *IRType = CGT.ConvertType(Ty);
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001809
Chris Lattner15842bd2010-07-29 05:02:29 +00001810 // Wrapper structs that just contain vectors are passed just like vectors,
1811 // strip them off if present.
Chris Lattner9cbe4f02011-07-09 17:41:47 +00001812 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
Chris Lattner15842bd2010-07-29 05:02:29 +00001813 while (STy && STy->getNumElements() == 1) {
1814 IRType = STy->getElementType(0);
1815 STy = dyn_cast<llvm::StructType>(IRType);
1816 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001817
Bruno Cardoso Lopes528a8c72011-07-08 22:57:35 +00001818 // If the preferred type is a 16-byte vector, prefer to pass it.
Chris Lattner9cbe4f02011-07-09 17:41:47 +00001819 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
1820 llvm::Type *EltTy = VT->getElementType();
Bruno Cardoso Lopes4943c152011-07-11 22:41:29 +00001821 unsigned BitWidth = VT->getBitWidth();
Tanya Lattnerce275672011-11-28 23:18:11 +00001822 if ((BitWidth >= 128 && BitWidth <= 256) &&
Chris Lattner0f408f52010-07-29 04:56:46 +00001823 (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
1824 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
1825 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
1826 EltTy->isIntegerTy(128)))
1827 return VT;
1828 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001829
Chris Lattner0f408f52010-07-29 04:56:46 +00001830 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
1831}
1832
Chris Lattnere2962be2010-07-29 07:30:00 +00001833/// BitsContainNoUserData - Return true if the specified [start,end) bit range
1834/// is known to either be off the end of the specified type or being in
1835/// alignment padding. The user type specified is known to be at most 128 bits
1836/// in size, and have passed through X86_64ABIInfo::classify with a successful
1837/// classification that put one of the two halves in the INTEGER class.
1838///
1839/// It is conservatively correct to return false.
1840static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
1841 unsigned EndBit, ASTContext &Context) {
1842 // If the bytes being queried are off the end of the type, there is no user
1843 // data hiding here. This handles analysis of builtins, vectors and other
1844 // types that don't contain interesting padding.
1845 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
1846 if (TySize <= StartBit)
1847 return true;
1848
Chris Lattner021c3a32010-07-29 07:43:55 +00001849 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
1850 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
1851 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
1852
1853 // Check each element to see if the element overlaps with the queried range.
1854 for (unsigned i = 0; i != NumElts; ++i) {
1855 // If the element is after the span we care about, then we're done..
1856 unsigned EltOffset = i*EltSize;
1857 if (EltOffset >= EndBit) break;
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001858
Chris Lattner021c3a32010-07-29 07:43:55 +00001859 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
1860 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
1861 EndBit-EltOffset, Context))
1862 return false;
1863 }
1864 // If it overlaps no elements, then it is safe to process as padding.
1865 return true;
1866 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001867
Chris Lattnere2962be2010-07-29 07:30:00 +00001868 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1869 const RecordDecl *RD = RT->getDecl();
1870 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001871
Chris Lattnere2962be2010-07-29 07:30:00 +00001872 // If this is a C++ record, check the bases first.
1873 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1874 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
1875 e = CXXRD->bases_end(); i != e; ++i) {
1876 assert(!i->isVirtual() && !i->getType()->isDependentType() &&
1877 "Unexpected base class!");
1878 const CXXRecordDecl *Base =
1879 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001880
Chris Lattnere2962be2010-07-29 07:30:00 +00001881 // If the base is after the span we care about, ignore it.
Benjamin Kramerd4f51982012-07-04 18:45:14 +00001882 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
Chris Lattnere2962be2010-07-29 07:30:00 +00001883 if (BaseOffset >= EndBit) continue;
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001884
Chris Lattnere2962be2010-07-29 07:30:00 +00001885 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
1886 if (!BitsContainNoUserData(i->getType(), BaseStart,
1887 EndBit-BaseOffset, Context))
1888 return false;
1889 }
1890 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001891
Chris Lattnere2962be2010-07-29 07:30:00 +00001892 // Verify that no field has data that overlaps the region of interest. Yes
1893 // this could be sped up a lot by being smarter about queried fields,
1894 // however we're only looking at structs up to 16 bytes, so we don't care
1895 // much.
1896 unsigned idx = 0;
1897 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1898 i != e; ++i, ++idx) {
1899 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001900
Chris Lattnere2962be2010-07-29 07:30:00 +00001901 // If we found a field after the region we care about, then we're done.
1902 if (FieldOffset >= EndBit) break;
1903
1904 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
1905 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
1906 Context))
1907 return false;
1908 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001909
Chris Lattnere2962be2010-07-29 07:30:00 +00001910 // If nothing in this record overlapped the area of interest, then we're
1911 // clean.
1912 return true;
1913 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001914
Chris Lattnere2962be2010-07-29 07:30:00 +00001915 return false;
1916}
1917
Chris Lattner0b362002010-07-29 18:39:32 +00001918/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
1919/// float member at the specified offset. For example, {int,{float}} has a
1920/// float at offset 4. It is conservatively correct for this routine to return
1921/// false.
Chris Lattner2acc6e32011-07-18 04:24:23 +00001922static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
Micah Villmow25a6a842012-10-08 16:25:52 +00001923 const llvm::DataLayout &TD) {
Chris Lattner0b362002010-07-29 18:39:32 +00001924 // Base case if we find a float.
1925 if (IROffset == 0 && IRType->isFloatTy())
1926 return true;
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001927
Chris Lattner0b362002010-07-29 18:39:32 +00001928 // If this is a struct, recurse into the field at the specified offset.
Chris Lattner2acc6e32011-07-18 04:24:23 +00001929 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
Chris Lattner0b362002010-07-29 18:39:32 +00001930 const llvm::StructLayout *SL = TD.getStructLayout(STy);
1931 unsigned Elt = SL->getElementContainingOffset(IROffset);
1932 IROffset -= SL->getElementOffset(Elt);
1933 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
1934 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001935
Chris Lattner0b362002010-07-29 18:39:32 +00001936 // If this is an array, recurse into the field at the specified offset.
Chris Lattner2acc6e32011-07-18 04:24:23 +00001937 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
1938 llvm::Type *EltTy = ATy->getElementType();
Chris Lattner0b362002010-07-29 18:39:32 +00001939 unsigned EltSize = TD.getTypeAllocSize(EltTy);
1940 IROffset -= IROffset/EltSize*EltSize;
1941 return ContainsFloatAtOffset(EltTy, IROffset, TD);
1942 }
1943
1944 return false;
1945}
1946
Chris Lattnerf47c9442010-07-29 18:13:09 +00001947
1948/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
1949/// low 8 bytes of an XMM register, corresponding to the SSE class.
Chris Lattner9cbe4f02011-07-09 17:41:47 +00001950llvm::Type *X86_64ABIInfo::
1951GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
Chris Lattnerf47c9442010-07-29 18:13:09 +00001952 QualType SourceTy, unsigned SourceOffset) const {
Chris Lattnercba8d312010-07-29 18:19:50 +00001953 // The only three choices we have are either double, <2 x float>, or float. We
Chris Lattnerf47c9442010-07-29 18:13:09 +00001954 // pass as float if the last 4 bytes is just padding. This happens for
1955 // structs that contain 3 floats.
1956 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
1957 SourceOffset*8+64, getContext()))
1958 return llvm::Type::getFloatTy(getVMContext());
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001959
Chris Lattner0b362002010-07-29 18:39:32 +00001960 // We want to pass as <2 x float> if the LLVM IR type contains a float at
1961 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
1962 // case.
Micah Villmow25a6a842012-10-08 16:25:52 +00001963 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
1964 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
Chris Lattner22fd4ba2010-08-25 23:39:14 +00001965 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00001966
Chris Lattnerf47c9442010-07-29 18:13:09 +00001967 return llvm::Type::getDoubleTy(getVMContext());
1968}
1969
1970
Chris Lattner0d2656d2010-07-29 17:40:35 +00001971/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
1972/// an 8-byte GPR. This means that we either have a scalar or we are talking
1973/// about the high or low part of an up-to-16-byte struct. This routine picks
1974/// the best LLVM IR type to represent this, which may be i64 or may be anything
Chris Lattner49382de2010-07-28 22:44:07 +00001975/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
1976/// etc).
1977///
1978/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
1979/// the source type. IROffset is an offset in bytes into the LLVM IR type that
1980/// the 8-byte value references. PrefType may be null.
1981///
1982/// SourceTy is the source level type for the entire argument. SourceOffset is
1983/// an offset into this that we're processing (which is always either 0 or 8).
1984///
Chris Lattner9cbe4f02011-07-09 17:41:47 +00001985llvm::Type *X86_64ABIInfo::
1986GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
Chris Lattner0d2656d2010-07-29 17:40:35 +00001987 QualType SourceTy, unsigned SourceOffset) const {
Chris Lattnere2962be2010-07-29 07:30:00 +00001988 // If we're dealing with an un-offset LLVM IR type, then it means that we're
1989 // returning an 8-byte unit starting with it. See if we can safely use it.
1990 if (IROffset == 0) {
1991 // Pointers and int64's always fill the 8-byte unit.
Derek Schuffbabaf312012-10-11 15:52:22 +00001992 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
1993 IRType->isIntegerTy(64))
Chris Lattnere2962be2010-07-29 07:30:00 +00001994 return IRType;
Chris Lattner49382de2010-07-28 22:44:07 +00001995
Chris Lattnere2962be2010-07-29 07:30:00 +00001996 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
1997 // goodness in the source type is just tail padding. This is allowed to
1998 // kick in for struct {double,int} on the int, but not on
1999 // struct{double,int,int} because we wouldn't return the second int. We
2000 // have to do this analysis on the source type because we can't depend on
2001 // unions being lowered a specific way etc.
2002 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
Derek Schuffbabaf312012-10-11 15:52:22 +00002003 IRType->isIntegerTy(32) ||
2004 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2005 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2006 cast<llvm::IntegerType>(IRType)->getBitWidth();
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002007
Chris Lattnere2962be2010-07-29 07:30:00 +00002008 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
2009 SourceOffset*8+64, getContext()))
2010 return IRType;
2011 }
2012 }
Chris Lattner49382de2010-07-28 22:44:07 +00002013
Chris Lattner2acc6e32011-07-18 04:24:23 +00002014 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
Chris Lattner49382de2010-07-28 22:44:07 +00002015 // If this is a struct, recurse into the field at the specified offset.
Micah Villmow25a6a842012-10-08 16:25:52 +00002016 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
Chris Lattner49382de2010-07-28 22:44:07 +00002017 if (IROffset < SL->getSizeInBytes()) {
2018 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2019 IROffset -= SL->getElementOffset(FieldIdx);
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002020
Chris Lattner0d2656d2010-07-29 17:40:35 +00002021 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2022 SourceTy, SourceOffset);
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002023 }
Chris Lattner49382de2010-07-28 22:44:07 +00002024 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002025
Chris Lattner2acc6e32011-07-18 04:24:23 +00002026 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
Chris Lattner9cbe4f02011-07-09 17:41:47 +00002027 llvm::Type *EltTy = ATy->getElementType();
Micah Villmow25a6a842012-10-08 16:25:52 +00002028 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
Chris Lattner021c3a32010-07-29 07:43:55 +00002029 unsigned EltOffset = IROffset/EltSize*EltSize;
Chris Lattner0d2656d2010-07-29 17:40:35 +00002030 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2031 SourceOffset);
Chris Lattner021c3a32010-07-29 07:43:55 +00002032 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002033
Chris Lattner49382de2010-07-28 22:44:07 +00002034 // Okay, we don't have any better idea of what to pass, so we pass this in an
2035 // integer register that isn't too big to fit the rest of the struct.
Chris Lattner9e45a3d2010-07-29 17:34:39 +00002036 unsigned TySizeInBytes =
2037 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
Chris Lattner49382de2010-07-28 22:44:07 +00002038
Chris Lattner9e45a3d2010-07-29 17:34:39 +00002039 assert(TySizeInBytes != SourceOffset && "Empty field?");
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002040
Chris Lattner49382de2010-07-28 22:44:07 +00002041 // It is always safe to classify this as an integer type up to i64 that
2042 // isn't larger than the structure.
Chris Lattner9e45a3d2010-07-29 17:34:39 +00002043 return llvm::IntegerType::get(getVMContext(),
2044 std::min(TySizeInBytes-SourceOffset, 8U)*8);
Chris Lattner9c254f02010-06-29 06:01:59 +00002045}
2046
Chris Lattner66e7b682010-09-01 00:50:20 +00002047
2048/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
2049/// be used as elements of a two register pair to pass or return, return a
2050/// first class aggregate to represent them. For example, if the low part of
2051/// a by-value argument should be passed as i32* and the high part as float,
2052/// return {i32*, float}.
Chris Lattner9cbe4f02011-07-09 17:41:47 +00002053static llvm::Type *
Jay Foadef6de3d2011-07-11 09:56:20 +00002054GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
Micah Villmow25a6a842012-10-08 16:25:52 +00002055 const llvm::DataLayout &TD) {
Chris Lattner66e7b682010-09-01 00:50:20 +00002056 // In order to correctly satisfy the ABI, we need to the high part to start
2057 // at offset 8. If the high and low parts we inferred are both 4-byte types
2058 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
2059 // the second element at offset 8. Check for this:
2060 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
2061 unsigned HiAlign = TD.getABITypeAlignment(Hi);
Micah Villmow25a6a842012-10-08 16:25:52 +00002062 unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign);
Chris Lattner66e7b682010-09-01 00:50:20 +00002063 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
Michael J. Spencer9cac4942010-10-19 06:39:39 +00002064
Chris Lattner66e7b682010-09-01 00:50:20 +00002065 // To handle this, we have to increase the size of the low part so that the
2066 // second element will start at an 8 byte offset. We can't increase the size
2067 // of the second element because it might make us access off the end of the
2068 // struct.
2069 if (HiStart != 8) {
2070 // There are only two sorts of types the ABI generation code can produce for
2071 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32.
2072 // Promote these to a larger type.
2073 if (Lo->isFloatTy())
2074 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2075 else {
2076 assert(Lo->isIntegerTy() && "Invalid/unknown lo type");
2077 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2078 }
2079 }
Michael J. Spencer9cac4942010-10-19 06:39:39 +00002080
Chris Lattner9cbe4f02011-07-09 17:41:47 +00002081 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL);
Michael J. Spencer9cac4942010-10-19 06:39:39 +00002082
2083
Chris Lattner66e7b682010-09-01 00:50:20 +00002084 // Verify that the second element is at an 8-byte offset.
2085 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2086 "Invalid x86-64 argument pair!");
2087 return Result;
2088}
2089
Chris Lattner519f68c2010-07-28 23:06:14 +00002090ABIArgInfo X86_64ABIInfo::
Chris Lattnera3c109b2010-07-29 02:16:43 +00002091classifyReturnType(QualType RetTy) const {
Chris Lattner519f68c2010-07-28 23:06:14 +00002092 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
2093 // classification algorithm.
2094 X86_64ABIInfo::Class Lo, Hi;
Eli Friedman7a1b5862013-06-12 00:13:45 +00002095 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
Chris Lattner519f68c2010-07-28 23:06:14 +00002096
2097 // Check some invariants.
2098 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
Chris Lattner519f68c2010-07-28 23:06:14 +00002099 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2100
Chris Lattner9cbe4f02011-07-09 17:41:47 +00002101 llvm::Type *ResType = 0;
Chris Lattner519f68c2010-07-28 23:06:14 +00002102 switch (Lo) {
2103 case NoClass:
Chris Lattner117e3f42010-07-30 04:02:24 +00002104 if (Hi == NoClass)
2105 return ABIArgInfo::getIgnore();
2106 // If the low part is just padding, it takes no register, leave ResType
2107 // null.
2108 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2109 "Unknown missing lo part");
2110 break;
Chris Lattner519f68c2010-07-28 23:06:14 +00002111
2112 case SSEUp:
2113 case X87Up:
David Blaikieb219cfc2011-09-23 05:06:16 +00002114 llvm_unreachable("Invalid classification for lo word.");
Chris Lattner519f68c2010-07-28 23:06:14 +00002115
2116 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
2117 // hidden argument.
2118 case Memory:
2119 return getIndirectReturnResult(RetTy);
2120
2121 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
2122 // available register of the sequence %rax, %rdx is used.
2123 case Integer:
Chris Lattner9cbe4f02011-07-09 17:41:47 +00002124 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002125
Chris Lattnereb518b42010-07-29 21:42:50 +00002126 // If we have a sign or zero extended integer, make sure to return Extend
2127 // so that the parameter gets the right LLVM IR attributes.
2128 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2129 // Treat an enum type as its underlying type.
2130 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2131 RetTy = EnumTy->getDecl()->getIntegerType();
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002132
Chris Lattnereb518b42010-07-29 21:42:50 +00002133 if (RetTy->isIntegralOrEnumerationType() &&
2134 RetTy->isPromotableIntegerType())
2135 return ABIArgInfo::getExtend();
2136 }
Chris Lattner519f68c2010-07-28 23:06:14 +00002137 break;
2138
2139 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
2140 // available SSE register of the sequence %xmm0, %xmm1 is used.
2141 case SSE:
Chris Lattner9cbe4f02011-07-09 17:41:47 +00002142 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
Chris Lattner0b30c672010-07-28 23:12:33 +00002143 break;
Chris Lattner519f68c2010-07-28 23:06:14 +00002144
2145 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
2146 // returned on the X87 stack in %st0 as 80-bit x87 number.
2147 case X87:
Chris Lattnerea044322010-07-29 02:01:43 +00002148 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
Chris Lattner0b30c672010-07-28 23:12:33 +00002149 break;
Chris Lattner519f68c2010-07-28 23:06:14 +00002150
2151 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
2152 // part of the value is returned in %st0 and the imaginary part in
2153 // %st1.
2154 case ComplexX87:
2155 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
Chris Lattner7650d952011-06-18 22:49:11 +00002156 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
Chris Lattnerea044322010-07-29 02:01:43 +00002157 llvm::Type::getX86_FP80Ty(getVMContext()),
Chris Lattner519f68c2010-07-28 23:06:14 +00002158 NULL);
2159 break;
2160 }
2161
Chris Lattner9cbe4f02011-07-09 17:41:47 +00002162 llvm::Type *HighPart = 0;
Chris Lattner519f68c2010-07-28 23:06:14 +00002163 switch (Hi) {
2164 // Memory was handled previously and X87 should
2165 // never occur as a hi class.
2166 case Memory:
2167 case X87:
David Blaikieb219cfc2011-09-23 05:06:16 +00002168 llvm_unreachable("Invalid classification for hi word.");
Chris Lattner519f68c2010-07-28 23:06:14 +00002169
2170 case ComplexX87: // Previously handled.
Chris Lattner0b30c672010-07-28 23:12:33 +00002171 case NoClass:
2172 break;
Chris Lattner519f68c2010-07-28 23:06:14 +00002173
Chris Lattner3db4dde2010-09-01 00:20:33 +00002174 case Integer:
Chris Lattner9cbe4f02011-07-09 17:41:47 +00002175 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
Chris Lattner3db4dde2010-09-01 00:20:33 +00002176 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2177 return ABIArgInfo::getDirect(HighPart, 8);
Chris Lattner519f68c2010-07-28 23:06:14 +00002178 break;
Chris Lattner3db4dde2010-09-01 00:20:33 +00002179 case SSE:
Chris Lattner9cbe4f02011-07-09 17:41:47 +00002180 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
Chris Lattner3db4dde2010-09-01 00:20:33 +00002181 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2182 return ABIArgInfo::getDirect(HighPart, 8);
Chris Lattner519f68c2010-07-28 23:06:14 +00002183 break;
2184
2185 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
Bruno Cardoso Lopes4943c152011-07-11 22:41:29 +00002186 // is passed in the next available eightbyte chunk if the last used
2187 // vector register.
Chris Lattner519f68c2010-07-28 23:06:14 +00002188 //
Chris Lattnerfc8f0e12011-04-15 05:22:18 +00002189 // SSEUP should always be preceded by SSE, just widen.
Chris Lattner519f68c2010-07-28 23:06:14 +00002190 case SSEUp:
2191 assert(Lo == SSE && "Unexpected SSEUp classification.");
Bruno Cardoso Lopes4943c152011-07-11 22:41:29 +00002192 ResType = GetByteVectorType(RetTy);
Chris Lattner519f68c2010-07-28 23:06:14 +00002193 break;
2194
2195 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
2196 // returned together with the previous X87 value in %st0.
2197 case X87Up:
Chris Lattnerfc8f0e12011-04-15 05:22:18 +00002198 // If X87Up is preceded by X87, we don't need to do
Chris Lattner519f68c2010-07-28 23:06:14 +00002199 // anything. However, in some cases with unions it may not be
Chris Lattnerfc8f0e12011-04-15 05:22:18 +00002200 // preceded by X87. In such situations we follow gcc and pass the
Chris Lattner519f68c2010-07-28 23:06:14 +00002201 // extra bits in an SSE reg.
Chris Lattner603519d2010-07-29 17:49:08 +00002202 if (Lo != X87) {
Chris Lattner9cbe4f02011-07-09 17:41:47 +00002203 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
Chris Lattner3db4dde2010-09-01 00:20:33 +00002204 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2205 return ABIArgInfo::getDirect(HighPart, 8);
Chris Lattner603519d2010-07-29 17:49:08 +00002206 }
Chris Lattner519f68c2010-07-28 23:06:14 +00002207 break;
2208 }
Michael J. Spencer9cac4942010-10-19 06:39:39 +00002209
Chris Lattner3db4dde2010-09-01 00:20:33 +00002210 // If a high part was specified, merge it together with the low part. It is
Chris Lattner645406a2010-09-01 00:24:35 +00002211 // known to pass in the high eightbyte of the result. We do this by forming a
2212 // first class struct aggregate with the high and low part: {low, high}
Chris Lattner66e7b682010-09-01 00:50:20 +00002213 if (HighPart)
Micah Villmow25a6a842012-10-08 16:25:52 +00002214 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
Chris Lattner519f68c2010-07-28 23:06:14 +00002215
Chris Lattnereb518b42010-07-29 21:42:50 +00002216 return ABIArgInfo::getDirect(ResType);
Chris Lattner519f68c2010-07-28 23:06:14 +00002217}
2218
Daniel Dunbaredfac032012-03-10 01:03:58 +00002219ABIArgInfo X86_64ABIInfo::classifyArgumentType(
Eli Friedman7a1b5862013-06-12 00:13:45 +00002220 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
2221 bool isNamedArg)
Daniel Dunbaredfac032012-03-10 01:03:58 +00002222 const
2223{
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002224 X86_64ABIInfo::Class Lo, Hi;
Eli Friedman7a1b5862013-06-12 00:13:45 +00002225 classify(Ty, 0, Lo, Hi, isNamedArg);
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002226
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002227 // Check some invariants.
2228 // FIXME: Enforce these by construction.
2229 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002230 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2231
2232 neededInt = 0;
2233 neededSSE = 0;
Chris Lattner9cbe4f02011-07-09 17:41:47 +00002234 llvm::Type *ResType = 0;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002235 switch (Lo) {
2236 case NoClass:
Chris Lattner117e3f42010-07-30 04:02:24 +00002237 if (Hi == NoClass)
2238 return ABIArgInfo::getIgnore();
2239 // If the low part is just padding, it takes no register, leave ResType
2240 // null.
2241 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2242 "Unknown missing lo part");
2243 break;
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002244
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002245 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
2246 // on the stack.
2247 case Memory:
2248
2249 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
2250 // COMPLEX_X87, it is passed in memory.
2251 case X87:
2252 case ComplexX87:
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00002253 if (getRecordArgABI(Ty, CGT) == CGCXXABI::RAA_Indirect)
Eli Friedmanded137f2011-06-29 07:04:55 +00002254 ++neededInt;
Daniel Dunbaredfac032012-03-10 01:03:58 +00002255 return getIndirectResult(Ty, freeIntRegs);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002256
2257 case SSEUp:
2258 case X87Up:
David Blaikieb219cfc2011-09-23 05:06:16 +00002259 llvm_unreachable("Invalid classification for lo word.");
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002260
2261 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
2262 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
2263 // and %r9 is used.
2264 case Integer:
Chris Lattner9c254f02010-06-29 06:01:59 +00002265 ++neededInt;
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002266
Chris Lattner49382de2010-07-28 22:44:07 +00002267 // Pick an 8-byte type based on the preferred type.
Chris Lattner9cbe4f02011-07-09 17:41:47 +00002268 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
Chris Lattnereb518b42010-07-29 21:42:50 +00002269
2270 // If we have a sign or zero extended integer, make sure to return Extend
2271 // so that the parameter gets the right LLVM IR attributes.
2272 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2273 // Treat an enum type as its underlying type.
2274 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2275 Ty = EnumTy->getDecl()->getIntegerType();
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002276
Chris Lattnereb518b42010-07-29 21:42:50 +00002277 if (Ty->isIntegralOrEnumerationType() &&
2278 Ty->isPromotableIntegerType())
2279 return ABIArgInfo::getExtend();
2280 }
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002281
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002282 break;
2283
2284 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
2285 // available SSE register is used, the registers are taken in the
2286 // order from %xmm0 to %xmm7.
Bill Wendlingbb465d72010-10-18 03:41:31 +00002287 case SSE: {
Chris Lattner9cbe4f02011-07-09 17:41:47 +00002288 llvm::Type *IRType = CGT.ConvertType(Ty);
Eli Friedman14508ff2011-07-02 00:57:27 +00002289 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
Bill Wendling99aaae82010-10-18 23:51:38 +00002290 ++neededSSE;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002291 break;
2292 }
Bill Wendlingbb465d72010-10-18 03:41:31 +00002293 }
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002294
Chris Lattner9cbe4f02011-07-09 17:41:47 +00002295 llvm::Type *HighPart = 0;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002296 switch (Hi) {
2297 // Memory was handled previously, ComplexX87 and X87 should
Chris Lattnerfc8f0e12011-04-15 05:22:18 +00002298 // never occur as hi classes, and X87Up must be preceded by X87,
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002299 // which is passed in memory.
2300 case Memory:
2301 case X87:
2302 case ComplexX87:
David Blaikieb219cfc2011-09-23 05:06:16 +00002303 llvm_unreachable("Invalid classification for hi word.");
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002304
2305 case NoClass: break;
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002306
Chris Lattner645406a2010-09-01 00:24:35 +00002307 case Integer:
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002308 ++neededInt;
Chris Lattner49382de2010-07-28 22:44:07 +00002309 // Pick an 8-byte type based on the preferred type.
Chris Lattner9cbe4f02011-07-09 17:41:47 +00002310 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002311
Chris Lattner645406a2010-09-01 00:24:35 +00002312 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
2313 return ABIArgInfo::getDirect(HighPart, 8);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002314 break;
2315
2316 // X87Up generally doesn't occur here (long double is passed in
2317 // memory), except in situations involving unions.
2318 case X87Up:
Chris Lattner645406a2010-09-01 00:24:35 +00002319 case SSE:
Chris Lattner9cbe4f02011-07-09 17:41:47 +00002320 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002321
Chris Lattner645406a2010-09-01 00:24:35 +00002322 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
2323 return ABIArgInfo::getDirect(HighPart, 8);
Chris Lattner117e3f42010-07-30 04:02:24 +00002324
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002325 ++neededSSE;
2326 break;
2327
2328 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
2329 // eightbyte is passed in the upper half of the last used SSE
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002330 // register. This only happens when 128-bit vectors are passed.
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002331 case SSEUp:
Chris Lattnerab5722e2010-07-28 23:47:21 +00002332 assert(Lo == SSE && "Unexpected SSEUp classification");
Bruno Cardoso Lopes4943c152011-07-11 22:41:29 +00002333 ResType = GetByteVectorType(Ty);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002334 break;
2335 }
2336
Chris Lattner645406a2010-09-01 00:24:35 +00002337 // If a high part was specified, merge it together with the low part. It is
2338 // known to pass in the high eightbyte of the result. We do this by forming a
2339 // first class struct aggregate with the high and low part: {low, high}
2340 if (HighPart)
Micah Villmow25a6a842012-10-08 16:25:52 +00002341 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
Michael J. Spencer9cac4942010-10-19 06:39:39 +00002342
Chris Lattnereb518b42010-07-29 21:42:50 +00002343 return ABIArgInfo::getDirect(ResType);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002344}
2345
Chris Lattneree5dcd02010-07-29 02:31:05 +00002346void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002347
Chris Lattnera3c109b2010-07-29 02:16:43 +00002348 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002349
2350 // Keep track of the number of assigned registers.
Bill Wendling99aaae82010-10-18 23:51:38 +00002351 unsigned freeIntRegs = 6, freeSSERegs = 8;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002352
2353 // If the return value is indirect, then the hidden argument is consuming one
2354 // integer register.
2355 if (FI.getReturnInfo().isIndirect())
2356 --freeIntRegs;
2357
Eli Friedman7a1b5862013-06-12 00:13:45 +00002358 bool isVariadic = FI.isVariadic();
2359 unsigned numRequiredArgs = 0;
2360 if (isVariadic)
2361 numRequiredArgs = FI.getRequiredArgs().getNumRequiredArgs();
2362
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002363 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
2364 // get assigned (in left-to-right order) for passing as follows...
2365 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2366 it != ie; ++it) {
Eli Friedman7a1b5862013-06-12 00:13:45 +00002367 bool isNamedArg = true;
2368 if (isVariadic)
Aaron Ballmaneba7d2f2013-06-12 15:03:45 +00002369 isNamedArg = (it - FI.arg_begin()) <
2370 static_cast<signed>(numRequiredArgs);
Eli Friedman7a1b5862013-06-12 00:13:45 +00002371
Bill Wendling99aaae82010-10-18 23:51:38 +00002372 unsigned neededInt, neededSSE;
Daniel Dunbaredfac032012-03-10 01:03:58 +00002373 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
Eli Friedman7a1b5862013-06-12 00:13:45 +00002374 neededSSE, isNamedArg);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002375
2376 // AMD64-ABI 3.2.3p3: If there are no registers available for any
2377 // eightbyte of an argument, the whole argument is passed on the
2378 // stack. If registers have already been assigned for some
2379 // eightbytes of such an argument, the assignments get reverted.
Bill Wendling99aaae82010-10-18 23:51:38 +00002380 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002381 freeIntRegs -= neededInt;
2382 freeSSERegs -= neededSSE;
2383 } else {
Daniel Dunbaredfac032012-03-10 01:03:58 +00002384 it->info = getIndirectResult(it->type, freeIntRegs);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002385 }
2386 }
2387}
2388
2389static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
2390 QualType Ty,
2391 CodeGenFunction &CGF) {
2392 llvm::Value *overflow_arg_area_p =
2393 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
2394 llvm::Value *overflow_arg_area =
2395 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
2396
2397 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
2398 // byte boundary if alignment needed by type exceeds 8 byte boundary.
Eli Friedman8d2fe422011-11-18 02:44:19 +00002399 // It isn't stated explicitly in the standard, but in practice we use
2400 // alignment greater than 16 where necessary.
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002401 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
2402 if (Align > 8) {
Eli Friedman8d2fe422011-11-18 02:44:19 +00002403 // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
Owen Anderson0032b272009-08-13 21:57:51 +00002404 llvm::Value *Offset =
Eli Friedman8d2fe422011-11-18 02:44:19 +00002405 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002406 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
2407 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
Chris Lattner77b89b82010-06-27 07:15:29 +00002408 CGF.Int64Ty);
Eli Friedman8d2fe422011-11-18 02:44:19 +00002409 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002410 overflow_arg_area =
2411 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
2412 overflow_arg_area->getType(),
2413 "overflow_arg_area.align");
2414 }
2415
2416 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
Chris Lattner2acc6e32011-07-18 04:24:23 +00002417 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002418 llvm::Value *Res =
2419 CGF.Builder.CreateBitCast(overflow_arg_area,
Owen Anderson96e0fc72009-07-29 22:16:19 +00002420 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002421
2422 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
2423 // l->overflow_arg_area + sizeof(type).
2424 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
2425 // an 8 byte boundary.
2426
2427 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
Owen Anderson0032b272009-08-13 21:57:51 +00002428 llvm::Value *Offset =
Chris Lattner77b89b82010-06-27 07:15:29 +00002429 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002430 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
2431 "overflow_arg_area.next");
2432 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
2433
2434 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
2435 return Res;
2436}
2437
2438llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2439 CodeGenFunction &CGF) const {
2440 // Assume that va_list type is correct; should be pointer to LLVM type:
2441 // struct {
2442 // i32 gp_offset;
2443 // i32 fp_offset;
2444 // i8* overflow_arg_area;
2445 // i8* reg_save_area;
2446 // };
Bill Wendling99aaae82010-10-18 23:51:38 +00002447 unsigned neededInt, neededSSE;
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002448
Chris Lattnera14db752010-03-11 18:19:55 +00002449 Ty = CGF.getContext().getCanonicalType(Ty);
Eli Friedman7a1b5862013-06-12 00:13:45 +00002450 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
2451 /*isNamedArg*/false);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002452
2453 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
2454 // in the registers. If not go to step 7.
2455 if (!neededInt && !neededSSE)
2456 return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2457
2458 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
2459 // general purpose registers needed to pass type and num_fp to hold
2460 // the number of floating point registers needed.
2461
2462 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
2463 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
2464 // l->fp_offset > 304 - num_fp * 16 go to step 7.
2465 //
2466 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
2467 // register save space).
2468
2469 llvm::Value *InRegs = 0;
2470 llvm::Value *gp_offset_p = 0, *gp_offset = 0;
2471 llvm::Value *fp_offset_p = 0, *fp_offset = 0;
2472 if (neededInt) {
2473 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
2474 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
Chris Lattner1090a9b2010-06-28 21:43:59 +00002475 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
2476 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002477 }
2478
2479 if (neededSSE) {
2480 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
2481 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
2482 llvm::Value *FitsInFP =
Chris Lattner1090a9b2010-06-28 21:43:59 +00002483 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
2484 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002485 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
2486 }
2487
2488 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
2489 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
2490 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
2491 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
2492
2493 // Emit code to load the value if it was passed in registers.
2494
2495 CGF.EmitBlock(InRegBlock);
2496
2497 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
2498 // an offset of l->gp_offset and/or l->fp_offset. This may require
2499 // copying to a temporary location in case the parameter is passed
2500 // in different register classes or requires an alignment greater
2501 // than 8 for general purpose registers and 16 for XMM registers.
2502 //
2503 // FIXME: This really results in shameful code when we end up needing to
2504 // collect arguments from different places; often what should result in a
2505 // simple assembling of a structure from scattered addresses has many more
2506 // loads than necessary. Can we clean this up?
Chris Lattner2acc6e32011-07-18 04:24:23 +00002507 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002508 llvm::Value *RegAddr =
2509 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
2510 "reg_save_area");
2511 if (neededInt && neededSSE) {
2512 // FIXME: Cleanup.
Chris Lattner800588f2010-07-29 06:26:06 +00002513 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
Chris Lattner2acc6e32011-07-18 04:24:23 +00002514 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
Eli Friedmaneeb00622013-06-07 23:20:55 +00002515 llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2516 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002517 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
Chris Lattner2acc6e32011-07-18 04:24:23 +00002518 llvm::Type *TyLo = ST->getElementType(0);
2519 llvm::Type *TyHi = ST->getElementType(1);
Chris Lattnera8b7a7d2010-08-26 06:28:35 +00002520 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002521 "Unexpected ABI info for mixed regs");
Chris Lattner2acc6e32011-07-18 04:24:23 +00002522 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
2523 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002524 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2525 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
Duncan Sandsf177d9d2010-02-15 16:14:01 +00002526 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr;
2527 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002528 llvm::Value *V =
2529 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
2530 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2531 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
2532 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2533
Owen Andersona1cf15f2009-07-14 23:10:40 +00002534 RegAddr = CGF.Builder.CreateBitCast(Tmp,
Owen Anderson96e0fc72009-07-29 22:16:19 +00002535 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002536 } else if (neededInt) {
2537 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2538 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
Owen Anderson96e0fc72009-07-29 22:16:19 +00002539 llvm::PointerType::getUnqual(LTy));
Eli Friedmaneeb00622013-06-07 23:20:55 +00002540
2541 // Copy to a temporary if necessary to ensure the appropriate alignment.
2542 std::pair<CharUnits, CharUnits> SizeAlign =
2543 CGF.getContext().getTypeInfoInChars(Ty);
2544 uint64_t TySize = SizeAlign.first.getQuantity();
2545 unsigned TyAlign = SizeAlign.second.getQuantity();
2546 if (TyAlign > 8) {
Eli Friedmaneeb00622013-06-07 23:20:55 +00002547 llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2548 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false);
2549 RegAddr = Tmp;
2550 }
Chris Lattnerdce5ad02010-06-28 20:05:43 +00002551 } else if (neededSSE == 1) {
2552 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2553 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2554 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002555 } else {
Chris Lattnerdce5ad02010-06-28 20:05:43 +00002556 assert(neededSSE == 2 && "Invalid number of needed registers!");
2557 // SSE registers are spaced 16 bytes apart in the register save
2558 // area, we need to collect the two eightbytes together.
2559 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
Chris Lattner1090a9b2010-06-28 21:43:59 +00002560 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
Chris Lattner8b418682012-02-07 00:39:47 +00002561 llvm::Type *DoubleTy = CGF.DoubleTy;
Chris Lattner2acc6e32011-07-18 04:24:23 +00002562 llvm::Type *DblPtrTy =
Chris Lattnerdce5ad02010-06-28 20:05:43 +00002563 llvm::PointerType::getUnqual(DoubleTy);
Eli Friedmaneeb00622013-06-07 23:20:55 +00002564 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, NULL);
2565 llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty);
2566 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
Chris Lattnerdce5ad02010-06-28 20:05:43 +00002567 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
2568 DblPtrTy));
2569 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2570 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
2571 DblPtrTy));
2572 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2573 RegAddr = CGF.Builder.CreateBitCast(Tmp,
2574 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002575 }
2576
2577 // AMD64-ABI 3.5.7p5: Step 5. Set:
2578 // l->gp_offset = l->gp_offset + num_gp * 8
2579 // l->fp_offset = l->fp_offset + num_fp * 16.
2580 if (neededInt) {
Chris Lattner77b89b82010-06-27 07:15:29 +00002581 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002582 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
2583 gp_offset_p);
2584 }
2585 if (neededSSE) {
Chris Lattner77b89b82010-06-27 07:15:29 +00002586 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002587 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
2588 fp_offset_p);
2589 }
2590 CGF.EmitBranch(ContBlock);
2591
2592 // Emit code to load the value if it was passed in memory.
2593
2594 CGF.EmitBlock(InMemBlock);
2595 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2596
2597 // Return the appropriate result.
2598
2599 CGF.EmitBlock(ContBlock);
Jay Foadbbf3bac2011-03-30 11:28:58 +00002600 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002601 "vaarg.addr");
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002602 ResAddr->addIncoming(RegAddr, InRegBlock);
2603 ResAddr->addIncoming(MemAddr, InMemBlock);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00002604 return ResAddr;
2605}
2606
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00002607ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, bool IsReturnType) const {
NAKAMURA Takumia7573222011-01-17 22:56:31 +00002608
2609 if (Ty->isVoidType())
2610 return ABIArgInfo::getIgnore();
2611
2612 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2613 Ty = EnumTy->getDecl()->getIntegerType();
2614
2615 uint64_t Size = getContext().getTypeSize(Ty);
2616
2617 if (const RecordType *RT = Ty->getAs<RecordType>()) {
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00002618 if (IsReturnType) {
2619 if (isRecordReturnIndirect(RT, CGT))
2620 return ABIArgInfo::getIndirect(0, false);
2621 } else {
2622 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, CGT))
2623 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
2624 }
2625
2626 if (RT->getDecl()->hasFlexibleArrayMember())
NAKAMURA Takumia7573222011-01-17 22:56:31 +00002627 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2628
NAKAMURA Takumi6f174332011-02-22 03:56:57 +00002629 // FIXME: mingw-w64-gcc emits 128-bit struct as i128
John McCall64aa4b32013-04-16 22:48:15 +00002630 if (Size == 128 && getTarget().getTriple().getOS() == llvm::Triple::MinGW32)
NAKAMURA Takumi6f174332011-02-22 03:56:57 +00002631 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2632 Size));
2633
2634 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
2635 // not 1, 2, 4, or 8 bytes, must be passed by reference."
2636 if (Size <= 64 &&
NAKAMURA Takumiff8be0e2011-01-19 00:11:33 +00002637 (Size & (Size - 1)) == 0)
NAKAMURA Takumia7573222011-01-17 22:56:31 +00002638 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2639 Size));
2640
2641 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2642 }
2643
2644 if (Ty->isPromotableIntegerType())
2645 return ABIArgInfo::getExtend();
2646
2647 return ABIArgInfo::getDirect();
2648}
2649
2650void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2651
2652 QualType RetTy = FI.getReturnType();
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00002653 FI.getReturnInfo() = classify(RetTy, true);
NAKAMURA Takumia7573222011-01-17 22:56:31 +00002654
NAKAMURA Takumia7573222011-01-17 22:56:31 +00002655 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2656 it != ie; ++it)
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00002657 it->info = classify(it->type, false);
NAKAMURA Takumia7573222011-01-17 22:56:31 +00002658}
2659
Chris Lattnerf13721d2010-08-31 16:44:54 +00002660llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2661 CodeGenFunction &CGF) const {
Chris Lattner8b418682012-02-07 00:39:47 +00002662 llvm::Type *BPP = CGF.Int8PtrPtrTy;
Chris Lattnerdce5ad02010-06-28 20:05:43 +00002663
Chris Lattnerf13721d2010-08-31 16:44:54 +00002664 CGBuilderTy &Builder = CGF.Builder;
2665 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
2666 "ap");
2667 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2668 llvm::Type *PTy =
2669 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2670 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
2671
2672 uint64_t Offset =
2673 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
2674 llvm::Value *NextAddr =
2675 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
2676 "ap.next");
2677 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2678
2679 return AddrTyped;
2680}
Chris Lattnerdce5ad02010-06-28 20:05:43 +00002681
Benjamin Kramerc6f84cf2012-10-20 13:02:06 +00002682namespace {
2683
Derek Schuff263366f2012-10-16 22:30:41 +00002684class NaClX86_64ABIInfo : public ABIInfo {
2685 public:
2686 NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
2687 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {}
2688 virtual void computeInfo(CGFunctionInfo &FI) const;
2689 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2690 CodeGenFunction &CGF) const;
2691 private:
2692 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
2693 X86_64ABIInfo NInfo; // Used for everything else.
2694};
2695
2696class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2697 public:
2698 NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
2699 : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {}
2700};
2701
Benjamin Kramerc6f84cf2012-10-20 13:02:06 +00002702}
2703
Derek Schuff263366f2012-10-16 22:30:41 +00002704void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2705 if (FI.getASTCallingConvention() == CC_PnaclCall)
2706 PInfo.computeInfo(FI);
2707 else
2708 NInfo.computeInfo(FI);
2709}
2710
2711llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2712 CodeGenFunction &CGF) const {
2713 // Always use the native convention; calling pnacl-style varargs functions
2714 // is unuspported.
2715 return NInfo.EmitVAArg(VAListAddr, Ty, CGF);
2716}
2717
2718
John McCallec853ba2010-03-11 00:10:12 +00002719// PowerPC-32
2720
2721namespace {
2722class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2723public:
Chris Lattnerea044322010-07-29 02:01:43 +00002724 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002725
John McCallec853ba2010-03-11 00:10:12 +00002726 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
2727 // This is recovered from gcc output.
2728 return 1; // r1 is the dedicated stack pointer
2729 }
2730
2731 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002732 llvm::Value *Address) const;
John McCallec853ba2010-03-11 00:10:12 +00002733};
2734
2735}
2736
2737bool
2738PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2739 llvm::Value *Address) const {
2740 // This is calculated from the LLVM and GCC tables and verified
2741 // against gcc output. AFAIK all ABIs use the same encoding.
2742
2743 CodeGen::CGBuilderTy &Builder = CGF.Builder;
John McCallec853ba2010-03-11 00:10:12 +00002744
Chris Lattner8b418682012-02-07 00:39:47 +00002745 llvm::IntegerType *i8 = CGF.Int8Ty;
John McCallec853ba2010-03-11 00:10:12 +00002746 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2747 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
2748 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
2749
2750 // 0-31: r0-31, the 4-byte general-purpose registers
John McCallaeeb7012010-05-27 06:19:26 +00002751 AssignToArrayRange(Builder, Address, Four8, 0, 31);
John McCallec853ba2010-03-11 00:10:12 +00002752
2753 // 32-63: fp0-31, the 8-byte floating-point registers
John McCallaeeb7012010-05-27 06:19:26 +00002754 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
John McCallec853ba2010-03-11 00:10:12 +00002755
2756 // 64-76 are various 4-byte special-purpose registers:
2757 // 64: mq
2758 // 65: lr
2759 // 66: ctr
2760 // 67: ap
2761 // 68-75 cr0-7
2762 // 76: xer
John McCallaeeb7012010-05-27 06:19:26 +00002763 AssignToArrayRange(Builder, Address, Four8, 64, 76);
John McCallec853ba2010-03-11 00:10:12 +00002764
2765 // 77-108: v0-31, the 16-byte vector registers
John McCallaeeb7012010-05-27 06:19:26 +00002766 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
John McCallec853ba2010-03-11 00:10:12 +00002767
2768 // 109: vrsave
2769 // 110: vscr
2770 // 111: spe_acc
2771 // 112: spefscr
2772 // 113: sfp
John McCallaeeb7012010-05-27 06:19:26 +00002773 AssignToArrayRange(Builder, Address, Four8, 109, 113);
John McCallec853ba2010-03-11 00:10:12 +00002774
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00002775 return false;
John McCallec853ba2010-03-11 00:10:12 +00002776}
2777
Roman Divacky0fbc4b92012-05-09 18:22:46 +00002778// PowerPC-64
2779
2780namespace {
Bill Schmidt2fc107f2012-10-03 19:18:57 +00002781/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
2782class PPC64_SVR4_ABIInfo : public DefaultABIInfo {
2783
2784public:
2785 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
2786
Ulrich Weigand71c0dcc2012-11-05 19:13:42 +00002787 bool isPromotableTypeForABI(QualType Ty) const;
2788
2789 ABIArgInfo classifyReturnType(QualType RetTy) const;
2790 ABIArgInfo classifyArgumentType(QualType Ty) const;
2791
Bill Schmidtb1f5fe02012-10-12 19:26:17 +00002792 // TODO: We can add more logic to computeInfo to improve performance.
2793 // Example: For aggregate arguments that fit in a register, we could
2794 // use getDirectInReg (as is done below for structs containing a single
2795 // floating-point value) to avoid pushing them to memory on function
2796 // entry. This would require changing the logic in PPCISelLowering
2797 // when lowering the parameters in the caller and args in the callee.
2798 virtual void computeInfo(CGFunctionInfo &FI) const {
2799 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2800 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2801 it != ie; ++it) {
2802 // We rely on the default argument classification for the most part.
2803 // One exception: An aggregate containing a single floating-point
Bill Schmidtb1993102013-07-23 22:15:57 +00002804 // or vector item must be passed in a register if one is available.
Bill Schmidtb1f5fe02012-10-12 19:26:17 +00002805 const Type *T = isSingleElementStruct(it->type, getContext());
2806 if (T) {
2807 const BuiltinType *BT = T->getAs<BuiltinType>();
Bill Schmidtb1993102013-07-23 22:15:57 +00002808 if (T->isVectorType() || (BT && BT->isFloatingPoint())) {
Bill Schmidtb1f5fe02012-10-12 19:26:17 +00002809 QualType QT(T, 0);
2810 it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
2811 continue;
2812 }
2813 }
2814 it->info = classifyArgumentType(it->type);
2815 }
2816 }
Bill Schmidt2fc107f2012-10-03 19:18:57 +00002817
2818 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr,
2819 QualType Ty,
2820 CodeGenFunction &CGF) const;
2821};
2822
2823class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
2824public:
2825 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT)
2826 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {}
2827
2828 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
2829 // This is recovered from gcc output.
2830 return 1; // r1 is the dedicated stack pointer
2831 }
2832
2833 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2834 llvm::Value *Address) const;
2835};
2836
Roman Divacky0fbc4b92012-05-09 18:22:46 +00002837class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2838public:
2839 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
2840
2841 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
2842 // This is recovered from gcc output.
2843 return 1; // r1 is the dedicated stack pointer
2844 }
2845
2846 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2847 llvm::Value *Address) const;
2848};
2849
2850}
2851
Ulrich Weigand71c0dcc2012-11-05 19:13:42 +00002852// Return true if the ABI requires Ty to be passed sign- or zero-
2853// extended to 64 bits.
2854bool
2855PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
2856 // Treat an enum type as its underlying type.
2857 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2858 Ty = EnumTy->getDecl()->getIntegerType();
2859
2860 // Promotable integer types are required to be promoted by the ABI.
2861 if (Ty->isPromotableIntegerType())
2862 return true;
2863
2864 // In addition to the usual promotable integer types, we also need to
2865 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
2866 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
2867 switch (BT->getKind()) {
2868 case BuiltinType::Int:
2869 case BuiltinType::UInt:
2870 return true;
2871 default:
2872 break;
2873 }
2874
2875 return false;
2876}
2877
2878ABIArgInfo
2879PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
Bill Schmidtc9715fc2012-11-27 02:46:43 +00002880 if (Ty->isAnyComplexType())
2881 return ABIArgInfo::getDirect();
2882
Ulrich Weigand71c0dcc2012-11-05 19:13:42 +00002883 if (isAggregateTypeForABI(Ty)) {
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00002884 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
2885 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
Ulrich Weigand71c0dcc2012-11-05 19:13:42 +00002886
2887 return ABIArgInfo::getIndirect(0);
2888 }
2889
2890 return (isPromotableTypeForABI(Ty) ?
2891 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2892}
2893
2894ABIArgInfo
2895PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
2896 if (RetTy->isVoidType())
2897 return ABIArgInfo::getIgnore();
2898
Bill Schmidt9e6111a2012-12-17 04:20:17 +00002899 if (RetTy->isAnyComplexType())
2900 return ABIArgInfo::getDirect();
2901
Ulrich Weigand71c0dcc2012-11-05 19:13:42 +00002902 if (isAggregateTypeForABI(RetTy))
2903 return ABIArgInfo::getIndirect(0);
2904
2905 return (isPromotableTypeForABI(RetTy) ?
2906 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2907}
2908
Bill Schmidt2fc107f2012-10-03 19:18:57 +00002909// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
2910llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
2911 QualType Ty,
2912 CodeGenFunction &CGF) const {
2913 llvm::Type *BP = CGF.Int8PtrTy;
2914 llvm::Type *BPP = CGF.Int8PtrPtrTy;
2915
2916 CGBuilderTy &Builder = CGF.Builder;
2917 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
2918 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2919
Bill Schmidt19f8e852013-01-14 17:45:36 +00002920 // Update the va_list pointer. The pointer should be bumped by the
2921 // size of the object. We can trust getTypeSize() except for a complex
2922 // type whose base type is smaller than a doubleword. For these, the
2923 // size of the object is 16 bytes; see below for further explanation.
Bill Schmidt2fc107f2012-10-03 19:18:57 +00002924 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8;
Bill Schmidt19f8e852013-01-14 17:45:36 +00002925 QualType BaseTy;
2926 unsigned CplxBaseSize = 0;
2927
2928 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
2929 BaseTy = CTy->getElementType();
2930 CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8;
2931 if (CplxBaseSize < 8)
2932 SizeInBytes = 16;
2933 }
2934
Bill Schmidt2fc107f2012-10-03 19:18:57 +00002935 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8);
2936 llvm::Value *NextAddr =
2937 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset),
2938 "ap.next");
2939 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2940
Bill Schmidt19f8e852013-01-14 17:45:36 +00002941 // If we have a complex type and the base type is smaller than 8 bytes,
2942 // the ABI calls for the real and imaginary parts to be right-adjusted
2943 // in separate doublewords. However, Clang expects us to produce a
2944 // pointer to a structure with the two parts packed tightly. So generate
2945 // loads of the real and imaginary parts relative to the va_list pointer,
2946 // and store them to a temporary structure.
2947 if (CplxBaseSize && CplxBaseSize < 8) {
2948 llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
2949 llvm::Value *ImagAddr = RealAddr;
2950 RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize));
2951 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize));
2952 llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy));
2953 RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy);
2954 ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy);
2955 llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal");
2956 llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag");
2957 llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty),
2958 "vacplx");
2959 llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real");
2960 llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag");
2961 Builder.CreateStore(Real, RealPtr, false);
2962 Builder.CreateStore(Imag, ImagPtr, false);
2963 return Ptr;
2964 }
2965
Bill Schmidt2fc107f2012-10-03 19:18:57 +00002966 // If the argument is smaller than 8 bytes, it is right-adjusted in
2967 // its doubleword slot. Adjust the pointer to pick it up from the
2968 // correct offset.
2969 if (SizeInBytes < 8) {
2970 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
2971 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
2972 Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
2973 }
2974
2975 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2976 return Builder.CreateBitCast(Addr, PTy);
2977}
2978
2979static bool
2980PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2981 llvm::Value *Address) {
Roman Divacky0fbc4b92012-05-09 18:22:46 +00002982 // This is calculated from the LLVM and GCC tables and verified
2983 // against gcc output. AFAIK all ABIs use the same encoding.
2984
2985 CodeGen::CGBuilderTy &Builder = CGF.Builder;
2986
2987 llvm::IntegerType *i8 = CGF.Int8Ty;
2988 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2989 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
2990 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
2991
2992 // 0-31: r0-31, the 8-byte general-purpose registers
2993 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
2994
2995 // 32-63: fp0-31, the 8-byte floating-point registers
2996 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
2997
2998 // 64-76 are various 4-byte special-purpose registers:
2999 // 64: mq
3000 // 65: lr
3001 // 66: ctr
3002 // 67: ap
3003 // 68-75 cr0-7
3004 // 76: xer
3005 AssignToArrayRange(Builder, Address, Four8, 64, 76);
3006
3007 // 77-108: v0-31, the 16-byte vector registers
3008 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3009
3010 // 109: vrsave
3011 // 110: vscr
3012 // 111: spe_acc
3013 // 112: spefscr
3014 // 113: sfp
3015 AssignToArrayRange(Builder, Address, Four8, 109, 113);
3016
3017 return false;
3018}
John McCallec853ba2010-03-11 00:10:12 +00003019
Bill Schmidt2fc107f2012-10-03 19:18:57 +00003020bool
3021PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
3022 CodeGen::CodeGenFunction &CGF,
3023 llvm::Value *Address) const {
3024
3025 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3026}
3027
3028bool
3029PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3030 llvm::Value *Address) const {
3031
3032 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3033}
3034
Chris Lattnerdce5ad02010-06-28 20:05:43 +00003035//===----------------------------------------------------------------------===//
Daniel Dunbar34d91fd2009-09-12 00:59:49 +00003036// ARM ABI Implementation
Chris Lattnerdce5ad02010-06-28 20:05:43 +00003037//===----------------------------------------------------------------------===//
Daniel Dunbar34d91fd2009-09-12 00:59:49 +00003038
3039namespace {
3040
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003041class ARMABIInfo : public ABIInfo {
Daniel Dunbar5e7bace2009-09-12 01:00:39 +00003042public:
3043 enum ABIKind {
3044 APCS = 0,
3045 AAPCS = 1,
3046 AAPCS_VFP
3047 };
3048
3049private:
3050 ABIKind Kind;
3051
3052public:
John McCallbd7370a2013-02-28 19:01:20 +00003053 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {
3054 setRuntimeCC();
3055 }
Daniel Dunbar5e7bace2009-09-12 01:00:39 +00003056
John McCall49e34be2011-08-30 01:42:09 +00003057 bool isEABI() const {
John McCall64aa4b32013-04-16 22:48:15 +00003058 StringRef Env = getTarget().getTriple().getEnvironmentName();
Logan Chien94a71422012-09-02 09:30:11 +00003059 return (Env == "gnueabi" || Env == "eabi" ||
3060 Env == "android" || Env == "androideabi");
John McCall49e34be2011-08-30 01:42:09 +00003061 }
3062
Daniel Dunbar5e7bace2009-09-12 01:00:39 +00003063private:
3064 ABIKind getABIKind() const { return Kind; }
3065
Chris Lattnera3c109b2010-07-29 02:16:43 +00003066 ABIArgInfo classifyReturnType(QualType RetTy) const;
Manman Ren710c5172012-10-31 19:02:26 +00003067 ABIArgInfo classifyArgumentType(QualType RetTy, int *VFPRegs,
3068 unsigned &AllocatedVFP,
Manman Renb3fa55f2012-10-30 23:21:41 +00003069 bool &IsHA) const;
Manman Ren97f81572012-10-16 19:18:39 +00003070 bool isIllegalVectorType(QualType Ty) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003071
Chris Lattneree5dcd02010-07-29 02:31:05 +00003072 virtual void computeInfo(CGFunctionInfo &FI) const;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003073
3074 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3075 CodeGenFunction &CGF) const;
John McCallbd7370a2013-02-28 19:01:20 +00003076
3077 llvm::CallingConv::ID getLLVMDefaultCC() const;
3078 llvm::CallingConv::ID getABIDefaultCC() const;
3079 void setRuntimeCC();
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003080};
3081
Anton Korobeynikov82d0a412010-01-10 12:58:08 +00003082class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
3083public:
Chris Lattnerea044322010-07-29 02:01:43 +00003084 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
3085 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
John McCall6374c332010-03-06 00:35:14 +00003086
John McCall49e34be2011-08-30 01:42:09 +00003087 const ARMABIInfo &getABIInfo() const {
3088 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
3089 }
3090
John McCall6374c332010-03-06 00:35:14 +00003091 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
3092 return 13;
3093 }
Roman Divacky09345d12011-05-18 19:36:54 +00003094
Chris Lattner5f9e2722011-07-23 10:55:15 +00003095 StringRef getARCRetainAutoreleasedReturnValueMarker() const {
John McCallf85e1932011-06-15 23:02:42 +00003096 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
3097 }
3098
Roman Divacky09345d12011-05-18 19:36:54 +00003099 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3100 llvm::Value *Address) const {
Chris Lattner8b418682012-02-07 00:39:47 +00003101 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
Roman Divacky09345d12011-05-18 19:36:54 +00003102
3103 // 0-15 are the 16 integer registers.
Chris Lattner8b418682012-02-07 00:39:47 +00003104 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
Roman Divacky09345d12011-05-18 19:36:54 +00003105 return false;
3106 }
John McCall49e34be2011-08-30 01:42:09 +00003107
3108 unsigned getSizeOfUnwindException() const {
3109 if (getABIInfo().isEABI()) return 88;
3110 return TargetCodeGenInfo::getSizeOfUnwindException();
3111 }
Anton Korobeynikov82d0a412010-01-10 12:58:08 +00003112};
3113
Daniel Dunbar34d91fd2009-09-12 00:59:49 +00003114}
3115
Chris Lattneree5dcd02010-07-29 02:31:05 +00003116void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
Manman Renb3fa55f2012-10-30 23:21:41 +00003117 // To correctly handle Homogeneous Aggregate, we need to keep track of the
Manman Ren710c5172012-10-31 19:02:26 +00003118 // VFP registers allocated so far.
Manman Renb3fa55f2012-10-30 23:21:41 +00003119 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
3120 // VFP registers of the appropriate type unallocated then the argument is
3121 // allocated to the lowest-numbered sequence of such registers.
3122 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
3123 // unallocated are marked as unavailable.
3124 unsigned AllocatedVFP = 0;
Manman Ren710c5172012-10-31 19:02:26 +00003125 int VFPRegs[16] = { 0 };
Chris Lattnera3c109b2010-07-29 02:16:43 +00003126 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003127 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
Manman Renb3fa55f2012-10-30 23:21:41 +00003128 it != ie; ++it) {
3129 unsigned PreAllocation = AllocatedVFP;
3130 bool IsHA = false;
3131 // 6.1.2.3 There is one VFP co-processor register class using registers
3132 // s0-s15 (d0-d7) for passing arguments.
3133 const unsigned NumVFPs = 16;
Manman Ren710c5172012-10-31 19:02:26 +00003134 it->info = classifyArgumentType(it->type, VFPRegs, AllocatedVFP, IsHA);
Manman Renb3fa55f2012-10-30 23:21:41 +00003135 // If we do not have enough VFP registers for the HA, any VFP registers
3136 // that are unallocated are marked as unavailable. To achieve this, we add
3137 // padding of (NumVFPs - PreAllocation) floats.
3138 if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) {
3139 llvm::Type *PaddingTy = llvm::ArrayType::get(
3140 llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation);
3141 it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
3142 }
3143 }
Daniel Dunbar5e7bace2009-09-12 01:00:39 +00003144
Anton Korobeynikov414d8962011-04-14 20:06:49 +00003145 // Always honor user-specified calling convention.
3146 if (FI.getCallingConvention() != llvm::CallingConv::C)
3147 return;
3148
John McCallbd7370a2013-02-28 19:01:20 +00003149 llvm::CallingConv::ID cc = getRuntimeCC();
3150 if (cc != llvm::CallingConv::C)
3151 FI.setEffectiveCallingConvention(cc);
3152}
Rafael Espindola25117ab2010-06-16 16:13:39 +00003153
John McCallbd7370a2013-02-28 19:01:20 +00003154/// Return the default calling convention that LLVM will use.
3155llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
3156 // The default calling convention that LLVM will infer.
John McCall64aa4b32013-04-16 22:48:15 +00003157 if (getTarget().getTriple().getEnvironmentName()=="gnueabihf")
John McCallbd7370a2013-02-28 19:01:20 +00003158 return llvm::CallingConv::ARM_AAPCS_VFP;
3159 else if (isEABI())
3160 return llvm::CallingConv::ARM_AAPCS;
3161 else
3162 return llvm::CallingConv::ARM_APCS;
3163}
3164
3165/// Return the calling convention that our ABI would like us to use
3166/// as the C calling convention.
3167llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
Daniel Dunbar5e7bace2009-09-12 01:00:39 +00003168 switch (getABIKind()) {
John McCallbd7370a2013-02-28 19:01:20 +00003169 case APCS: return llvm::CallingConv::ARM_APCS;
3170 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
3171 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
Daniel Dunbar5e7bace2009-09-12 01:00:39 +00003172 }
John McCallbd7370a2013-02-28 19:01:20 +00003173 llvm_unreachable("bad ABI kind");
3174}
3175
3176void ARMABIInfo::setRuntimeCC() {
3177 assert(getRuntimeCC() == llvm::CallingConv::C);
3178
3179 // Don't muddy up the IR with a ton of explicit annotations if
3180 // they'd just match what LLVM will infer from the triple.
3181 llvm::CallingConv::ID abiCC = getABIDefaultCC();
3182 if (abiCC != getLLVMDefaultCC())
3183 RuntimeCC = abiCC;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003184}
3185
Bob Wilson194f06a2011-08-03 05:58:22 +00003186/// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous
3187/// aggregate. If HAMembers is non-null, the number of base elements
3188/// contained in the type is returned through it; this is used for the
3189/// recursive calls that check aggregate component types.
3190static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
3191 ASTContext &Context,
3192 uint64_t *HAMembers = 0) {
Anton Korobeynikoveaf856d2012-04-13 11:22:00 +00003193 uint64_t Members = 0;
Bob Wilson194f06a2011-08-03 05:58:22 +00003194 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
3195 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members))
3196 return false;
3197 Members *= AT->getSize().getZExtValue();
3198 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
3199 const RecordDecl *RD = RT->getDecl();
Anton Korobeynikoveaf856d2012-04-13 11:22:00 +00003200 if (RD->hasFlexibleArrayMember())
Bob Wilson194f06a2011-08-03 05:58:22 +00003201 return false;
Anton Korobeynikoveaf856d2012-04-13 11:22:00 +00003202
Bob Wilson194f06a2011-08-03 05:58:22 +00003203 Members = 0;
3204 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3205 i != e; ++i) {
David Blaikie581deb32012-06-06 20:45:41 +00003206 const FieldDecl *FD = *i;
Bob Wilson194f06a2011-08-03 05:58:22 +00003207 uint64_t FldMembers;
3208 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers))
3209 return false;
Anton Korobeynikoveaf856d2012-04-13 11:22:00 +00003210
3211 Members = (RD->isUnion() ?
3212 std::max(Members, FldMembers) : Members + FldMembers);
Bob Wilson194f06a2011-08-03 05:58:22 +00003213 }
3214 } else {
3215 Members = 1;
3216 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
3217 Members = 2;
3218 Ty = CT->getElementType();
3219 }
3220
3221 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
3222 // double, or 64-bit or 128-bit vectors.
3223 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3224 if (BT->getKind() != BuiltinType::Float &&
Tim Northoveradfa45f2012-07-20 22:29:29 +00003225 BT->getKind() != BuiltinType::Double &&
3226 BT->getKind() != BuiltinType::LongDouble)
Bob Wilson194f06a2011-08-03 05:58:22 +00003227 return false;
3228 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
3229 unsigned VecSize = Context.getTypeSize(VT);
3230 if (VecSize != 64 && VecSize != 128)
3231 return false;
3232 } else {
3233 return false;
3234 }
3235
3236 // The base type must be the same for all members. Vector types of the
3237 // same total size are treated as being equivalent here.
3238 const Type *TyPtr = Ty.getTypePtr();
3239 if (!Base)
3240 Base = TyPtr;
3241 if (Base != TyPtr &&
3242 (!Base->isVectorType() || !TyPtr->isVectorType() ||
3243 Context.getTypeSize(Base) != Context.getTypeSize(TyPtr)))
3244 return false;
3245 }
3246
3247 // Homogeneous Aggregates can have at most 4 members of the base type.
3248 if (HAMembers)
3249 *HAMembers = Members;
Anton Korobeynikoveaf856d2012-04-13 11:22:00 +00003250
3251 return (Members > 0 && Members <= 4);
Bob Wilson194f06a2011-08-03 05:58:22 +00003252}
3253
Manman Ren710c5172012-10-31 19:02:26 +00003254/// markAllocatedVFPs - update VFPRegs according to the alignment and
3255/// number of VFP registers (unit is S register) requested.
3256static void markAllocatedVFPs(int *VFPRegs, unsigned &AllocatedVFP,
3257 unsigned Alignment,
3258 unsigned NumRequired) {
3259 // Early Exit.
3260 if (AllocatedVFP >= 16)
3261 return;
3262 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
3263 // VFP registers of the appropriate type unallocated then the argument is
3264 // allocated to the lowest-numbered sequence of such registers.
3265 for (unsigned I = 0; I < 16; I += Alignment) {
3266 bool FoundSlot = true;
3267 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
3268 if (J >= 16 || VFPRegs[J]) {
3269 FoundSlot = false;
3270 break;
3271 }
3272 if (FoundSlot) {
3273 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
3274 VFPRegs[J] = 1;
3275 AllocatedVFP += NumRequired;
3276 return;
3277 }
3278 }
3279 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
3280 // unallocated are marked as unavailable.
3281 for (unsigned I = 0; I < 16; I++)
3282 VFPRegs[I] = 1;
3283 AllocatedVFP = 17; // We do not have enough VFP registers.
3284}
3285
3286ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, int *VFPRegs,
3287 unsigned &AllocatedVFP,
Manman Renb3fa55f2012-10-30 23:21:41 +00003288 bool &IsHA) const {
3289 // We update number of allocated VFPs according to
3290 // 6.1.2.1 The following argument types are VFP CPRCs:
3291 // A single-precision floating-point type (including promoted
3292 // half-precision types); A double-precision floating-point type;
3293 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
3294 // with a Base Type of a single- or double-precision floating-point type,
3295 // 64-bit containerized vectors or 128-bit containerized vectors with one
3296 // to four Elements.
3297
Manman Ren97f81572012-10-16 19:18:39 +00003298 // Handle illegal vector types here.
3299 if (isIllegalVectorType(Ty)) {
3300 uint64_t Size = getContext().getTypeSize(Ty);
3301 if (Size <= 32) {
3302 llvm::Type *ResType =
3303 llvm::Type::getInt32Ty(getVMContext());
3304 return ABIArgInfo::getDirect(ResType);
3305 }
3306 if (Size == 64) {
3307 llvm::Type *ResType = llvm::VectorType::get(
3308 llvm::Type::getInt32Ty(getVMContext()), 2);
Manman Ren710c5172012-10-31 19:02:26 +00003309 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2);
Manman Ren97f81572012-10-16 19:18:39 +00003310 return ABIArgInfo::getDirect(ResType);
3311 }
3312 if (Size == 128) {
3313 llvm::Type *ResType = llvm::VectorType::get(
3314 llvm::Type::getInt32Ty(getVMContext()), 4);
Manman Ren710c5172012-10-31 19:02:26 +00003315 markAllocatedVFPs(VFPRegs, AllocatedVFP, 4, 4);
Manman Ren97f81572012-10-16 19:18:39 +00003316 return ABIArgInfo::getDirect(ResType);
3317 }
3318 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3319 }
Manman Ren710c5172012-10-31 19:02:26 +00003320 // Update VFPRegs for legal vector types.
Manman Renb3fa55f2012-10-30 23:21:41 +00003321 if (const VectorType *VT = Ty->getAs<VectorType>()) {
3322 uint64_t Size = getContext().getTypeSize(VT);
3323 // Size of a legal vector should be power of 2 and above 64.
Manman Ren710c5172012-10-31 19:02:26 +00003324 markAllocatedVFPs(VFPRegs, AllocatedVFP, Size >= 128 ? 4 : 2, Size / 32);
Manman Renb3fa55f2012-10-30 23:21:41 +00003325 }
Manman Ren710c5172012-10-31 19:02:26 +00003326 // Update VFPRegs for floating point types.
Manman Renb3fa55f2012-10-30 23:21:41 +00003327 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3328 if (BT->getKind() == BuiltinType::Half ||
3329 BT->getKind() == BuiltinType::Float)
Manman Ren710c5172012-10-31 19:02:26 +00003330 markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, 1);
Manman Renb3fa55f2012-10-30 23:21:41 +00003331 if (BT->getKind() == BuiltinType::Double ||
Manman Ren710c5172012-10-31 19:02:26 +00003332 BT->getKind() == BuiltinType::LongDouble)
3333 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, 2);
Manman Renb3fa55f2012-10-30 23:21:41 +00003334 }
Manman Ren97f81572012-10-16 19:18:39 +00003335
John McCalld608cdb2010-08-22 10:59:02 +00003336 if (!isAggregateTypeForABI(Ty)) {
Douglas Gregoraa74a1e2010-02-02 20:10:50 +00003337 // Treat an enum type as its underlying type.
3338 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3339 Ty = EnumTy->getDecl()->getIntegerType();
3340
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +00003341 return (Ty->isPromotableIntegerType() ?
3342 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Douglas Gregoraa74a1e2010-02-02 20:10:50 +00003343 }
Daniel Dunbar98303b92009-09-13 08:03:58 +00003344
Tim Northoverf5c3a252013-06-21 22:49:34 +00003345 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
3346 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
3347
Daniel Dunbar42025572009-09-14 21:54:03 +00003348 // Ignore empty records.
Chris Lattnera3c109b2010-07-29 02:16:43 +00003349 if (isEmptyRecord(getContext(), Ty, true))
Daniel Dunbar42025572009-09-14 21:54:03 +00003350 return ABIArgInfo::getIgnore();
3351
Bob Wilson194f06a2011-08-03 05:58:22 +00003352 if (getABIKind() == ARMABIInfo::AAPCS_VFP) {
Manman Renb3fa55f2012-10-30 23:21:41 +00003353 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
3354 // into VFP registers.
Bob Wilson194f06a2011-08-03 05:58:22 +00003355 const Type *Base = 0;
Manman Renb3fa55f2012-10-30 23:21:41 +00003356 uint64_t Members = 0;
3357 if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {
Anton Korobeynikoveaf856d2012-04-13 11:22:00 +00003358 assert(Base && "Base class should be set for homogeneous aggregate");
Manman Renb3fa55f2012-10-30 23:21:41 +00003359 // Base can be a floating-point or a vector.
3360 if (Base->isVectorType()) {
3361 // ElementSize is in number of floats.
3362 unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4;
Manman Rencb489dd2012-11-06 19:05:29 +00003363 markAllocatedVFPs(VFPRegs, AllocatedVFP, ElementSize,
3364 Members * ElementSize);
Manman Renb3fa55f2012-10-30 23:21:41 +00003365 } else if (Base->isSpecificBuiltinType(BuiltinType::Float))
Manman Ren710c5172012-10-31 19:02:26 +00003366 markAllocatedVFPs(VFPRegs, AllocatedVFP, 1, Members);
Manman Renb3fa55f2012-10-30 23:21:41 +00003367 else {
3368 assert(Base->isSpecificBuiltinType(BuiltinType::Double) ||
3369 Base->isSpecificBuiltinType(BuiltinType::LongDouble));
Manman Ren710c5172012-10-31 19:02:26 +00003370 markAllocatedVFPs(VFPRegs, AllocatedVFP, 2, Members * 2);
Manman Renb3fa55f2012-10-30 23:21:41 +00003371 }
3372 IsHA = true;
Bob Wilson194f06a2011-08-03 05:58:22 +00003373 return ABIArgInfo::getExpand();
Anton Korobeynikoveaf856d2012-04-13 11:22:00 +00003374 }
Bob Wilson194f06a2011-08-03 05:58:22 +00003375 }
3376
Manman Ren634b3d22012-08-13 21:23:55 +00003377 // Support byval for ARM.
Manman Rencb489dd2012-11-06 19:05:29 +00003378 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
3379 // most 8-byte. We realign the indirect argument if type alignment is bigger
3380 // than ABI alignment.
Manman Renfd1ba912012-11-05 22:42:46 +00003381 uint64_t ABIAlign = 4;
3382 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
3383 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
3384 getABIKind() == ARMABIInfo::AAPCS)
3385 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
Manman Ren885ad692012-11-06 04:58:01 +00003386 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
3387 return ABIArgInfo::getIndirect(0, /*ByVal=*/true,
Manman Rencb489dd2012-11-06 19:05:29 +00003388 /*Realign=*/TyAlign > ABIAlign);
Eli Friedman79f30982012-08-09 00:31:40 +00003389 }
3390
Daniel Dunbar8aa87c72010-09-23 01:54:28 +00003391 // Otherwise, pass by coercing to a structure of the appropriate size.
Chris Lattner2acc6e32011-07-18 04:24:23 +00003392 llvm::Type* ElemTy;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003393 unsigned SizeRegs;
Eli Friedman79f30982012-08-09 00:31:40 +00003394 // FIXME: Try to match the types of the arguments more accurately where
3395 // we can.
3396 if (getContext().getTypeAlign(Ty) <= 32) {
Bob Wilson53fc1a62011-08-01 23:39:04 +00003397 ElemTy = llvm::Type::getInt32Ty(getVMContext());
3398 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
Manman Ren78eb76e2012-06-25 22:04:00 +00003399 } else {
Manman Ren78eb76e2012-06-25 22:04:00 +00003400 ElemTy = llvm::Type::getInt64Ty(getVMContext());
3401 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
Stuart Hastings67d097e2011-04-27 17:24:02 +00003402 }
Stuart Hastingsb7f62d02011-04-28 18:16:06 +00003403
Chris Lattner9cbe4f02011-07-09 17:41:47 +00003404 llvm::Type *STy =
Chris Lattner7650d952011-06-18 22:49:11 +00003405 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL);
Stuart Hastingsb7f62d02011-04-28 18:16:06 +00003406 return ABIArgInfo::getDirect(STy);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003407}
3408
Chris Lattnera3c109b2010-07-29 02:16:43 +00003409static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
Daniel Dunbar98303b92009-09-13 08:03:58 +00003410 llvm::LLVMContext &VMContext) {
3411 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
3412 // is called integer-like if its size is less than or equal to one word, and
3413 // the offset of each of its addressable sub-fields is zero.
3414
3415 uint64_t Size = Context.getTypeSize(Ty);
3416
3417 // Check that the type fits in a word.
3418 if (Size > 32)
3419 return false;
3420
3421 // FIXME: Handle vector types!
3422 if (Ty->isVectorType())
3423 return false;
3424
Daniel Dunbarb0d58192009-09-14 02:20:34 +00003425 // Float types are never treated as "integer like".
3426 if (Ty->isRealFloatingType())
3427 return false;
3428
Daniel Dunbar98303b92009-09-13 08:03:58 +00003429 // If this is a builtin or pointer type then it is ok.
John McCall183700f2009-09-21 23:43:11 +00003430 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
Daniel Dunbar98303b92009-09-13 08:03:58 +00003431 return true;
3432
Daniel Dunbar45815812010-02-01 23:31:26 +00003433 // Small complex integer types are "integer like".
3434 if (const ComplexType *CT = Ty->getAs<ComplexType>())
3435 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
Daniel Dunbar98303b92009-09-13 08:03:58 +00003436
3437 // Single element and zero sized arrays should be allowed, by the definition
3438 // above, but they are not.
3439
3440 // Otherwise, it must be a record type.
3441 const RecordType *RT = Ty->getAs<RecordType>();
3442 if (!RT) return false;
3443
3444 // Ignore records with flexible arrays.
3445 const RecordDecl *RD = RT->getDecl();
3446 if (RD->hasFlexibleArrayMember())
3447 return false;
3448
3449 // Check that all sub-fields are at offset 0, and are themselves "integer
3450 // like".
3451 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
3452
3453 bool HadField = false;
3454 unsigned idx = 0;
3455 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3456 i != e; ++i, ++idx) {
David Blaikie581deb32012-06-06 20:45:41 +00003457 const FieldDecl *FD = *i;
Daniel Dunbar98303b92009-09-13 08:03:58 +00003458
Daniel Dunbar679855a2010-01-29 03:22:29 +00003459 // Bit-fields are not addressable, we only need to verify they are "integer
3460 // like". We still have to disallow a subsequent non-bitfield, for example:
3461 // struct { int : 0; int x }
3462 // is non-integer like according to gcc.
3463 if (FD->isBitField()) {
3464 if (!RD->isUnion())
3465 HadField = true;
Daniel Dunbar98303b92009-09-13 08:03:58 +00003466
Daniel Dunbar679855a2010-01-29 03:22:29 +00003467 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
3468 return false;
Daniel Dunbar98303b92009-09-13 08:03:58 +00003469
Daniel Dunbar679855a2010-01-29 03:22:29 +00003470 continue;
Daniel Dunbar98303b92009-09-13 08:03:58 +00003471 }
3472
Daniel Dunbar679855a2010-01-29 03:22:29 +00003473 // Check if this field is at offset 0.
3474 if (Layout.getFieldOffset(idx) != 0)
3475 return false;
3476
Daniel Dunbar98303b92009-09-13 08:03:58 +00003477 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
3478 return false;
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00003479
Daniel Dunbar679855a2010-01-29 03:22:29 +00003480 // Only allow at most one field in a structure. This doesn't match the
3481 // wording above, but follows gcc in situations with a field following an
3482 // empty structure.
Daniel Dunbar98303b92009-09-13 08:03:58 +00003483 if (!RD->isUnion()) {
3484 if (HadField)
3485 return false;
3486
3487 HadField = true;
3488 }
3489 }
3490
3491 return true;
3492}
3493
Chris Lattnera3c109b2010-07-29 02:16:43 +00003494ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
Daniel Dunbar98303b92009-09-13 08:03:58 +00003495 if (RetTy->isVoidType())
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003496 return ABIArgInfo::getIgnore();
Daniel Dunbar98303b92009-09-13 08:03:58 +00003497
Daniel Dunbarf554b1c2010-09-23 01:54:32 +00003498 // Large vector types should be returned via memory.
3499 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
3500 return ABIArgInfo::getIndirect(0);
3501
John McCalld608cdb2010-08-22 10:59:02 +00003502 if (!isAggregateTypeForABI(RetTy)) {
Douglas Gregoraa74a1e2010-02-02 20:10:50 +00003503 // Treat an enum type as its underlying type.
3504 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3505 RetTy = EnumTy->getDecl()->getIntegerType();
3506
Anton Korobeynikovcc6fa882009-06-06 09:36:29 +00003507 return (RetTy->isPromotableIntegerType() ?
3508 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Douglas Gregoraa74a1e2010-02-02 20:10:50 +00003509 }
Daniel Dunbar98303b92009-09-13 08:03:58 +00003510
Rafael Espindola0eb1d972010-06-08 02:42:08 +00003511 // Structures with either a non-trivial destructor or a non-trivial
3512 // copy constructor are always indirect.
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00003513 if (isRecordReturnIndirect(RetTy, CGT))
Rafael Espindola0eb1d972010-06-08 02:42:08 +00003514 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3515
Daniel Dunbar98303b92009-09-13 08:03:58 +00003516 // Are we following APCS?
3517 if (getABIKind() == APCS) {
Chris Lattnera3c109b2010-07-29 02:16:43 +00003518 if (isEmptyRecord(getContext(), RetTy, false))
Daniel Dunbar98303b92009-09-13 08:03:58 +00003519 return ABIArgInfo::getIgnore();
3520
Daniel Dunbar4cc753f2010-02-01 23:31:19 +00003521 // Complex types are all returned as packed integers.
3522 //
3523 // FIXME: Consider using 2 x vector types if the back end handles them
3524 // correctly.
3525 if (RetTy->isAnyComplexType())
Chris Lattner800588f2010-07-29 06:26:06 +00003526 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
Chris Lattnera3c109b2010-07-29 02:16:43 +00003527 getContext().getTypeSize(RetTy)));
Daniel Dunbar4cc753f2010-02-01 23:31:19 +00003528
Daniel Dunbar98303b92009-09-13 08:03:58 +00003529 // Integer like structures are returned in r0.
Chris Lattnera3c109b2010-07-29 02:16:43 +00003530 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
Daniel Dunbar98303b92009-09-13 08:03:58 +00003531 // Return in the smallest viable integer type.
Chris Lattnera3c109b2010-07-29 02:16:43 +00003532 uint64_t Size = getContext().getTypeSize(RetTy);
Daniel Dunbar98303b92009-09-13 08:03:58 +00003533 if (Size <= 8)
Chris Lattner800588f2010-07-29 06:26:06 +00003534 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
Daniel Dunbar98303b92009-09-13 08:03:58 +00003535 if (Size <= 16)
Chris Lattner800588f2010-07-29 06:26:06 +00003536 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
3537 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
Daniel Dunbar98303b92009-09-13 08:03:58 +00003538 }
3539
3540 // Otherwise return in memory.
3541 return ABIArgInfo::getIndirect(0);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003542 }
Daniel Dunbar98303b92009-09-13 08:03:58 +00003543
3544 // Otherwise this is an AAPCS variant.
3545
Chris Lattnera3c109b2010-07-29 02:16:43 +00003546 if (isEmptyRecord(getContext(), RetTy, true))
Daniel Dunbar16a08082009-09-14 00:56:55 +00003547 return ABIArgInfo::getIgnore();
3548
Bob Wilson3b694fa2011-11-02 04:51:36 +00003549 // Check for homogeneous aggregates with AAPCS-VFP.
3550 if (getABIKind() == AAPCS_VFP) {
3551 const Type *Base = 0;
Anton Korobeynikoveaf856d2012-04-13 11:22:00 +00003552 if (isHomogeneousAggregate(RetTy, Base, getContext())) {
3553 assert(Base && "Base class should be set for homogeneous aggregate");
Bob Wilson3b694fa2011-11-02 04:51:36 +00003554 // Homogeneous Aggregates are returned directly.
3555 return ABIArgInfo::getDirect();
Anton Korobeynikoveaf856d2012-04-13 11:22:00 +00003556 }
Bob Wilson3b694fa2011-11-02 04:51:36 +00003557 }
3558
Daniel Dunbar98303b92009-09-13 08:03:58 +00003559 // Aggregates <= 4 bytes are returned in r0; other aggregates
3560 // are returned indirectly.
Chris Lattnera3c109b2010-07-29 02:16:43 +00003561 uint64_t Size = getContext().getTypeSize(RetTy);
Daniel Dunbar16a08082009-09-14 00:56:55 +00003562 if (Size <= 32) {
3563 // Return in the smallest viable integer type.
3564 if (Size <= 8)
Chris Lattner800588f2010-07-29 06:26:06 +00003565 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
Daniel Dunbar16a08082009-09-14 00:56:55 +00003566 if (Size <= 16)
Chris Lattner800588f2010-07-29 06:26:06 +00003567 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
3568 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
Daniel Dunbar16a08082009-09-14 00:56:55 +00003569 }
3570
Daniel Dunbar98303b92009-09-13 08:03:58 +00003571 return ABIArgInfo::getIndirect(0);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003572}
3573
Manman Ren97f81572012-10-16 19:18:39 +00003574/// isIllegalVector - check whether Ty is an illegal vector type.
3575bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
3576 if (const VectorType *VT = Ty->getAs<VectorType>()) {
3577 // Check whether VT is legal.
3578 unsigned NumElements = VT->getNumElements();
3579 uint64_t Size = getContext().getTypeSize(VT);
3580 // NumElements should be power of 2.
3581 if ((NumElements & (NumElements - 1)) != 0)
3582 return true;
3583 // Size should be greater than 32 bits.
3584 return Size <= 32;
3585 }
3586 return false;
3587}
3588
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003589llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
Chris Lattner77b89b82010-06-27 07:15:29 +00003590 CodeGenFunction &CGF) const {
Chris Lattner8b418682012-02-07 00:39:47 +00003591 llvm::Type *BP = CGF.Int8PtrTy;
3592 llvm::Type *BPP = CGF.Int8PtrPtrTy;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003593
3594 CGBuilderTy &Builder = CGF.Builder;
Chris Lattner8b418682012-02-07 00:39:47 +00003595 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003596 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
Manman Rend105e732012-10-16 19:01:37 +00003597
Tim Northover373ac0a2013-06-21 23:05:33 +00003598 if (isEmptyRecord(getContext(), Ty, true)) {
3599 // These are ignored for parameter passing purposes.
3600 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3601 return Builder.CreateBitCast(Addr, PTy);
3602 }
3603
Manman Rend105e732012-10-16 19:01:37 +00003604 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
Rafael Espindolae164c182011-08-02 22:33:37 +00003605 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
Manman Ren97f81572012-10-16 19:18:39 +00003606 bool IsIndirect = false;
Manman Rend105e732012-10-16 19:01:37 +00003607
3608 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
3609 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
Manman Ren93371022012-10-16 19:51:48 +00003610 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
3611 getABIKind() == ARMABIInfo::AAPCS)
3612 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
3613 else
3614 TyAlign = 4;
Manman Ren97f81572012-10-16 19:18:39 +00003615 // Use indirect if size of the illegal vector is bigger than 16 bytes.
3616 if (isIllegalVectorType(Ty) && Size > 16) {
3617 IsIndirect = true;
3618 Size = 4;
3619 TyAlign = 4;
3620 }
Manman Rend105e732012-10-16 19:01:37 +00003621
3622 // Handle address alignment for ABI alignment > 4 bytes.
Rafael Espindolae164c182011-08-02 22:33:37 +00003623 if (TyAlign > 4) {
3624 assert((TyAlign & (TyAlign - 1)) == 0 &&
3625 "Alignment is not power of 2!");
3626 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
3627 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
3628 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
Manman Rend105e732012-10-16 19:01:37 +00003629 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
Rafael Espindolae164c182011-08-02 22:33:37 +00003630 }
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003631
3632 uint64_t Offset =
Manman Rend105e732012-10-16 19:01:37 +00003633 llvm::RoundUpToAlignment(Size, 4);
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003634 llvm::Value *NextAddr =
Chris Lattner77b89b82010-06-27 07:15:29 +00003635 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003636 "ap.next");
3637 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3638
Manman Ren97f81572012-10-16 19:18:39 +00003639 if (IsIndirect)
3640 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
Manman Ren93371022012-10-16 19:51:48 +00003641 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) {
Manman Rend105e732012-10-16 19:01:37 +00003642 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur
3643 // may not be correctly aligned for the vector type. We create an aligned
3644 // temporary space and copy the content over from ap.cur to the temporary
3645 // space. This is necessary if the natural alignment of the type is greater
3646 // than the ABI alignment.
3647 llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
3648 CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
3649 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
3650 "var.align");
3651 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
3652 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
3653 Builder.CreateMemCpy(Dst, Src,
3654 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
3655 TyAlign, false);
3656 Addr = AlignedTemp; //The content is in aligned location.
3657 }
3658 llvm::Type *PTy =
3659 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3660 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
3661
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00003662 return AddrTyped;
3663}
3664
Benjamin Kramerc6f84cf2012-10-20 13:02:06 +00003665namespace {
3666
Derek Schuff263366f2012-10-16 22:30:41 +00003667class NaClARMABIInfo : public ABIInfo {
3668 public:
3669 NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
3670 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {}
3671 virtual void computeInfo(CGFunctionInfo &FI) const;
3672 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3673 CodeGenFunction &CGF) const;
3674 private:
3675 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
3676 ARMABIInfo NInfo; // Used for everything else.
3677};
3678
3679class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo {
3680 public:
3681 NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
3682 : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {}
3683};
3684
Benjamin Kramerc6f84cf2012-10-20 13:02:06 +00003685}
3686
Derek Schuff263366f2012-10-16 22:30:41 +00003687void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
3688 if (FI.getASTCallingConvention() == CC_PnaclCall)
3689 PInfo.computeInfo(FI);
3690 else
3691 static_cast<const ABIInfo&>(NInfo).computeInfo(FI);
3692}
3693
3694llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3695 CodeGenFunction &CGF) const {
3696 // Always use the native convention; calling pnacl-style varargs functions
3697 // is unsupported.
3698 return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF);
3699}
3700
Chris Lattnerdce5ad02010-06-28 20:05:43 +00003701//===----------------------------------------------------------------------===//
Tim Northoverc264e162013-01-31 12:13:10 +00003702// AArch64 ABI Implementation
3703//===----------------------------------------------------------------------===//
3704
3705namespace {
3706
3707class AArch64ABIInfo : public ABIInfo {
3708public:
3709 AArch64ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
3710
3711private:
3712 // The AArch64 PCS is explicit about return types and argument types being
3713 // handled identically, so we don't need to draw a distinction between
3714 // Argument and Return classification.
3715 ABIArgInfo classifyGenericType(QualType Ty, int &FreeIntRegs,
3716 int &FreeVFPRegs) const;
3717
3718 ABIArgInfo tryUseRegs(QualType Ty, int &FreeRegs, int RegsNeeded, bool IsInt,
3719 llvm::Type *DirectTy = 0) const;
3720
3721 virtual void computeInfo(CGFunctionInfo &FI) const;
3722
3723 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3724 CodeGenFunction &CGF) const;
3725};
3726
3727class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
3728public:
3729 AArch64TargetCodeGenInfo(CodeGenTypes &CGT)
3730 :TargetCodeGenInfo(new AArch64ABIInfo(CGT)) {}
3731
3732 const AArch64ABIInfo &getABIInfo() const {
3733 return static_cast<const AArch64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
3734 }
3735
3736 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
3737 return 31;
3738 }
3739
3740 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3741 llvm::Value *Address) const {
3742 // 0-31 are x0-x30 and sp: 8 bytes each
3743 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
3744 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 31);
3745
3746 // 64-95 are v0-v31: 16 bytes each
3747 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
3748 AssignToArrayRange(CGF.Builder, Address, Sixteen8, 64, 95);
3749
3750 return false;
3751 }
3752
3753};
3754
3755}
3756
3757void AArch64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3758 int FreeIntRegs = 8, FreeVFPRegs = 8;
3759
3760 FI.getReturnInfo() = classifyGenericType(FI.getReturnType(),
3761 FreeIntRegs, FreeVFPRegs);
3762
3763 FreeIntRegs = FreeVFPRegs = 8;
3764 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3765 it != ie; ++it) {
3766 it->info = classifyGenericType(it->type, FreeIntRegs, FreeVFPRegs);
3767
3768 }
3769}
3770
3771ABIArgInfo
3772AArch64ABIInfo::tryUseRegs(QualType Ty, int &FreeRegs, int RegsNeeded,
3773 bool IsInt, llvm::Type *DirectTy) const {
3774 if (FreeRegs >= RegsNeeded) {
3775 FreeRegs -= RegsNeeded;
3776 return ABIArgInfo::getDirect(DirectTy);
3777 }
3778
3779 llvm::Type *Padding = 0;
3780
3781 // We need padding so that later arguments don't get filled in anyway. That
3782 // wouldn't happen if only ByVal arguments followed in the same category, but
3783 // a large structure will simply seem to be a pointer as far as LLVM is
3784 // concerned.
3785 if (FreeRegs > 0) {
3786 if (IsInt)
3787 Padding = llvm::Type::getInt64Ty(getVMContext());
3788 else
3789 Padding = llvm::Type::getFloatTy(getVMContext());
3790
3791 // Either [N x i64] or [N x float].
3792 Padding = llvm::ArrayType::get(Padding, FreeRegs);
3793 FreeRegs = 0;
3794 }
3795
3796 return ABIArgInfo::getIndirect(getContext().getTypeAlign(Ty) / 8,
3797 /*IsByVal=*/ true, /*Realign=*/ false,
3798 Padding);
3799}
3800
3801
3802ABIArgInfo AArch64ABIInfo::classifyGenericType(QualType Ty,
3803 int &FreeIntRegs,
3804 int &FreeVFPRegs) const {
3805 // Can only occurs for return, but harmless otherwise.
3806 if (Ty->isVoidType())
3807 return ABIArgInfo::getIgnore();
3808
3809 // Large vector types should be returned via memory. There's no such concept
3810 // in the ABI, but they'd be over 16 bytes anyway so no matter how they're
3811 // classified they'd go into memory (see B.3).
3812 if (Ty->isVectorType() && getContext().getTypeSize(Ty) > 128) {
3813 if (FreeIntRegs > 0)
3814 --FreeIntRegs;
3815 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3816 }
3817
3818 // All non-aggregate LLVM types have a concrete ABI representation so they can
3819 // be passed directly. After this block we're guaranteed to be in a
3820 // complicated case.
3821 if (!isAggregateTypeForABI(Ty)) {
3822 // Treat an enum type as its underlying type.
3823 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3824 Ty = EnumTy->getDecl()->getIntegerType();
3825
3826 if (Ty->isFloatingType() || Ty->isVectorType())
3827 return tryUseRegs(Ty, FreeVFPRegs, /*RegsNeeded=*/ 1, /*IsInt=*/ false);
3828
3829 assert(getContext().getTypeSize(Ty) <= 128 &&
3830 "unexpectedly large scalar type");
3831
3832 int RegsNeeded = getContext().getTypeSize(Ty) > 64 ? 2 : 1;
3833
3834 // If the type may need padding registers to ensure "alignment", we must be
3835 // careful when this is accounted for. Increasing the effective size covers
3836 // all cases.
3837 if (getContext().getTypeAlign(Ty) == 128)
3838 RegsNeeded += FreeIntRegs % 2 != 0;
3839
3840 return tryUseRegs(Ty, FreeIntRegs, RegsNeeded, /*IsInt=*/ true);
3841 }
3842
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00003843 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT)) {
3844 if (FreeIntRegs > 0 && RAA == CGCXXABI::RAA_Indirect)
Tim Northoverc264e162013-01-31 12:13:10 +00003845 --FreeIntRegs;
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00003846 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
Tim Northoverc264e162013-01-31 12:13:10 +00003847 }
3848
3849 if (isEmptyRecord(getContext(), Ty, true)) {
3850 if (!getContext().getLangOpts().CPlusPlus) {
3851 // Empty structs outside C++ mode are a GNU extension, so no ABI can
3852 // possibly tell us what to do. It turns out (I believe) that GCC ignores
3853 // the object for parameter-passsing purposes.
3854 return ABIArgInfo::getIgnore();
3855 }
3856
3857 // The combination of C++98 9p5 (sizeof(struct) != 0) and the pseudocode
3858 // description of va_arg in the PCS require that an empty struct does
3859 // actually occupy space for parameter-passing. I'm hoping for a
3860 // clarification giving an explicit paragraph to point to in future.
3861 return tryUseRegs(Ty, FreeIntRegs, /*RegsNeeded=*/ 1, /*IsInt=*/ true,
3862 llvm::Type::getInt8Ty(getVMContext()));
3863 }
3864
3865 // Homogeneous vector aggregates get passed in registers or on the stack.
3866 const Type *Base = 0;
3867 uint64_t NumMembers = 0;
3868 if (isHomogeneousAggregate(Ty, Base, getContext(), &NumMembers)) {
3869 assert(Base && "Base class should be set for homogeneous aggregate");
3870 // Homogeneous aggregates are passed and returned directly.
3871 return tryUseRegs(Ty, FreeVFPRegs, /*RegsNeeded=*/ NumMembers,
3872 /*IsInt=*/ false);
3873 }
3874
3875 uint64_t Size = getContext().getTypeSize(Ty);
3876 if (Size <= 128) {
3877 // Small structs can use the same direct type whether they're in registers
3878 // or on the stack.
3879 llvm::Type *BaseTy;
3880 unsigned NumBases;
3881 int SizeInRegs = (Size + 63) / 64;
3882
3883 if (getContext().getTypeAlign(Ty) == 128) {
3884 BaseTy = llvm::Type::getIntNTy(getVMContext(), 128);
3885 NumBases = 1;
3886
3887 // If the type may need padding registers to ensure "alignment", we must
3888 // be careful when this is accounted for. Increasing the effective size
3889 // covers all cases.
3890 SizeInRegs += FreeIntRegs % 2 != 0;
3891 } else {
3892 BaseTy = llvm::Type::getInt64Ty(getVMContext());
3893 NumBases = SizeInRegs;
3894 }
3895 llvm::Type *DirectTy = llvm::ArrayType::get(BaseTy, NumBases);
3896
3897 return tryUseRegs(Ty, FreeIntRegs, /*RegsNeeded=*/ SizeInRegs,
3898 /*IsInt=*/ true, DirectTy);
3899 }
3900
3901 // If the aggregate is > 16 bytes, it's passed and returned indirectly. In
3902 // LLVM terms the return uses an "sret" pointer, but that's handled elsewhere.
3903 --FreeIntRegs;
3904 return ABIArgInfo::getIndirect(0, /* byVal = */ false);
3905}
3906
3907llvm::Value *AArch64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3908 CodeGenFunction &CGF) const {
3909 // The AArch64 va_list type and handling is specified in the Procedure Call
3910 // Standard, section B.4:
3911 //
3912 // struct {
3913 // void *__stack;
3914 // void *__gr_top;
3915 // void *__vr_top;
3916 // int __gr_offs;
3917 // int __vr_offs;
3918 // };
3919
3920 assert(!CGF.CGM.getDataLayout().isBigEndian()
3921 && "va_arg not implemented for big-endian AArch64");
3922
3923 int FreeIntRegs = 8, FreeVFPRegs = 8;
3924 Ty = CGF.getContext().getCanonicalType(Ty);
3925 ABIArgInfo AI = classifyGenericType(Ty, FreeIntRegs, FreeVFPRegs);
3926
3927 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
3928 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3929 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
3930 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3931
3932 llvm::Value *reg_offs_p = 0, *reg_offs = 0;
3933 int reg_top_index;
3934 int RegSize;
3935 if (FreeIntRegs < 8) {
3936 assert(FreeVFPRegs == 8 && "Arguments never split between int & VFP regs");
3937 // 3 is the field number of __gr_offs
3938 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
3939 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
3940 reg_top_index = 1; // field number for __gr_top
3941 RegSize = 8 * (8 - FreeIntRegs);
3942 } else {
3943 assert(FreeVFPRegs < 8 && "Argument must go in VFP or int regs");
3944 // 4 is the field number of __vr_offs.
3945 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
3946 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
3947 reg_top_index = 2; // field number for __vr_top
3948 RegSize = 16 * (8 - FreeVFPRegs);
3949 }
3950
3951 //=======================================
3952 // Find out where argument was passed
3953 //=======================================
3954
3955 // If reg_offs >= 0 we're already using the stack for this type of
3956 // argument. We don't want to keep updating reg_offs (in case it overflows,
3957 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
3958 // whatever they get).
3959 llvm::Value *UsingStack = 0;
3960 UsingStack = CGF.Builder.CreateICmpSGE(reg_offs,
3961 llvm::ConstantInt::get(CGF.Int32Ty, 0));
3962
3963 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
3964
3965 // Otherwise, at least some kind of argument could go in these registers, the
3966 // quesiton is whether this particular type is too big.
3967 CGF.EmitBlock(MaybeRegBlock);
3968
3969 // Integer arguments may need to correct register alignment (for example a
3970 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
3971 // align __gr_offs to calculate the potential address.
3972 if (FreeIntRegs < 8 && AI.isDirect() && getContext().getTypeAlign(Ty) > 64) {
3973 int Align = getContext().getTypeAlign(Ty) / 8;
3974
3975 reg_offs = CGF.Builder.CreateAdd(reg_offs,
3976 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
3977 "align_regoffs");
3978 reg_offs = CGF.Builder.CreateAnd(reg_offs,
3979 llvm::ConstantInt::get(CGF.Int32Ty, -Align),
3980 "aligned_regoffs");
3981 }
3982
3983 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
3984 llvm::Value *NewOffset = 0;
3985 NewOffset = CGF.Builder.CreateAdd(reg_offs,
3986 llvm::ConstantInt::get(CGF.Int32Ty, RegSize),
3987 "new_reg_offs");
3988 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
3989
3990 // Now we're in a position to decide whether this argument really was in
3991 // registers or not.
3992 llvm::Value *InRegs = 0;
3993 InRegs = CGF.Builder.CreateICmpSLE(NewOffset,
3994 llvm::ConstantInt::get(CGF.Int32Ty, 0),
3995 "inreg");
3996
3997 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
3998
3999 //=======================================
4000 // Argument was in registers
4001 //=======================================
4002
4003 // Now we emit the code for if the argument was originally passed in
4004 // registers. First start the appropriate block:
4005 CGF.EmitBlock(InRegBlock);
4006
4007 llvm::Value *reg_top_p = 0, *reg_top = 0;
4008 reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
4009 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
4010 llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
4011 llvm::Value *RegAddr = 0;
4012 llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
4013
4014 if (!AI.isDirect()) {
4015 // If it's been passed indirectly (actually a struct), whatever we find from
4016 // stored registers or on the stack will actually be a struct **.
4017 MemTy = llvm::PointerType::getUnqual(MemTy);
4018 }
4019
4020 const Type *Base = 0;
4021 uint64_t NumMembers;
4022 if (isHomogeneousAggregate(Ty, Base, getContext(), &NumMembers)
4023 && NumMembers > 1) {
4024 // Homogeneous aggregates passed in registers will have their elements split
4025 // and stored 16-bytes apart regardless of size (they're notionally in qN,
4026 // qN+1, ...). We reload and store into a temporary local variable
4027 // contiguously.
4028 assert(AI.isDirect() && "Homogeneous aggregates should be passed directly");
4029 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
4030 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
4031 llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy);
4032
4033 for (unsigned i = 0; i < NumMembers; ++i) {
4034 llvm::Value *BaseOffset = llvm::ConstantInt::get(CGF.Int32Ty, 16 * i);
4035 llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
4036 LoadAddr = CGF.Builder.CreateBitCast(LoadAddr,
4037 llvm::PointerType::getUnqual(BaseTy));
4038 llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i);
4039
4040 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
4041 CGF.Builder.CreateStore(Elem, StoreAddr);
4042 }
4043
4044 RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
4045 } else {
4046 // Otherwise the object is contiguous in memory
4047 RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
4048 }
4049
4050 CGF.EmitBranch(ContBlock);
4051
4052 //=======================================
4053 // Argument was on the stack
4054 //=======================================
4055 CGF.EmitBlock(OnStackBlock);
4056
4057 llvm::Value *stack_p = 0, *OnStackAddr = 0;
4058 stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
4059 OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
4060
4061 // Again, stack arguments may need realigmnent. In this case both integer and
4062 // floating-point ones might be affected.
4063 if (AI.isDirect() && getContext().getTypeAlign(Ty) > 64) {
4064 int Align = getContext().getTypeAlign(Ty) / 8;
4065
4066 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
4067
4068 OnStackAddr = CGF.Builder.CreateAdd(OnStackAddr,
4069 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
4070 "align_stack");
4071 OnStackAddr = CGF.Builder.CreateAnd(OnStackAddr,
4072 llvm::ConstantInt::get(CGF.Int64Ty, -Align),
4073 "align_stack");
4074
4075 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
4076 }
4077
4078 uint64_t StackSize;
4079 if (AI.isDirect())
4080 StackSize = getContext().getTypeSize(Ty) / 8;
4081 else
4082 StackSize = 8;
4083
4084 // All stack slots are 8 bytes
4085 StackSize = llvm::RoundUpToAlignment(StackSize, 8);
4086
4087 llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
4088 llvm::Value *NewStack = CGF.Builder.CreateGEP(OnStackAddr, StackSizeC,
4089 "new_stack");
4090
4091 // Write the new value of __stack for the next call to va_arg
4092 CGF.Builder.CreateStore(NewStack, stack_p);
4093
4094 OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
4095
4096 CGF.EmitBranch(ContBlock);
4097
4098 //=======================================
4099 // Tidy up
4100 //=======================================
4101 CGF.EmitBlock(ContBlock);
4102
4103 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
4104 ResAddr->addIncoming(RegAddr, InRegBlock);
4105 ResAddr->addIncoming(OnStackAddr, OnStackBlock);
4106
4107 if (AI.isDirect())
4108 return ResAddr;
4109
4110 return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
4111}
4112
4113//===----------------------------------------------------------------------===//
Justin Holewinski2c585b92012-05-24 17:43:12 +00004114// NVPTX ABI Implementation
Justin Holewinski0259c3a2011-04-22 11:10:38 +00004115//===----------------------------------------------------------------------===//
4116
4117namespace {
4118
Justin Holewinski2c585b92012-05-24 17:43:12 +00004119class NVPTXABIInfo : public ABIInfo {
Justin Holewinski0259c3a2011-04-22 11:10:38 +00004120public:
Justin Holewinskidca8f332013-03-30 14:38:24 +00004121 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
Justin Holewinski0259c3a2011-04-22 11:10:38 +00004122
4123 ABIArgInfo classifyReturnType(QualType RetTy) const;
4124 ABIArgInfo classifyArgumentType(QualType Ty) const;
4125
4126 virtual void computeInfo(CGFunctionInfo &FI) const;
4127 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4128 CodeGenFunction &CFG) const;
4129};
4130
Justin Holewinski2c585b92012-05-24 17:43:12 +00004131class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
Justin Holewinski0259c3a2011-04-22 11:10:38 +00004132public:
Justin Holewinski2c585b92012-05-24 17:43:12 +00004133 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
4134 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
Justin Holewinski818eafb2011-10-05 17:58:44 +00004135
Peter Collingbourne2f7aa992011-10-13 16:24:41 +00004136 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4137 CodeGen::CodeGenModule &M) const;
Justin Holewinskidca8f332013-03-30 14:38:24 +00004138private:
4139 static void addKernelMetadata(llvm::Function *F);
Justin Holewinski0259c3a2011-04-22 11:10:38 +00004140};
4141
Justin Holewinski2c585b92012-05-24 17:43:12 +00004142ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
Justin Holewinski0259c3a2011-04-22 11:10:38 +00004143 if (RetTy->isVoidType())
4144 return ABIArgInfo::getIgnore();
4145 if (isAggregateTypeForABI(RetTy))
4146 return ABIArgInfo::getIndirect(0);
4147 return ABIArgInfo::getDirect();
4148}
4149
Justin Holewinski2c585b92012-05-24 17:43:12 +00004150ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
Justin Holewinski0259c3a2011-04-22 11:10:38 +00004151 if (isAggregateTypeForABI(Ty))
4152 return ABIArgInfo::getIndirect(0);
4153
4154 return ABIArgInfo::getDirect();
4155}
4156
Justin Holewinski2c585b92012-05-24 17:43:12 +00004157void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
Justin Holewinski0259c3a2011-04-22 11:10:38 +00004158 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4159 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
4160 it != ie; ++it)
4161 it->info = classifyArgumentType(it->type);
4162
4163 // Always honor user-specified calling convention.
4164 if (FI.getCallingConvention() != llvm::CallingConv::C)
4165 return;
4166
John McCallbd7370a2013-02-28 19:01:20 +00004167 FI.setEffectiveCallingConvention(getRuntimeCC());
4168}
4169
Justin Holewinski2c585b92012-05-24 17:43:12 +00004170llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4171 CodeGenFunction &CFG) const {
4172 llvm_unreachable("NVPTX does not support varargs");
Justin Holewinski0259c3a2011-04-22 11:10:38 +00004173}
4174
Justin Holewinski2c585b92012-05-24 17:43:12 +00004175void NVPTXTargetCodeGenInfo::
4176SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4177 CodeGen::CodeGenModule &M) const{
Justin Holewinski818eafb2011-10-05 17:58:44 +00004178 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4179 if (!FD) return;
4180
4181 llvm::Function *F = cast<llvm::Function>(GV);
4182
4183 // Perform special handling in OpenCL mode
David Blaikie4e4d0842012-03-11 07:00:24 +00004184 if (M.getLangOpts().OpenCL) {
Justin Holewinskidca8f332013-03-30 14:38:24 +00004185 // Use OpenCL function attributes to check for kernel functions
Justin Holewinski818eafb2011-10-05 17:58:44 +00004186 // By default, all functions are device functions
Justin Holewinski818eafb2011-10-05 17:58:44 +00004187 if (FD->hasAttr<OpenCLKernelAttr>()) {
Justin Holewinskidca8f332013-03-30 14:38:24 +00004188 // OpenCL __kernel functions get kernel metadata
4189 addKernelMetadata(F);
Justin Holewinski818eafb2011-10-05 17:58:44 +00004190 // And kernel functions are not subject to inlining
Bill Wendling72390b32012-12-20 19:27:06 +00004191 F->addFnAttr(llvm::Attribute::NoInline);
Justin Holewinski818eafb2011-10-05 17:58:44 +00004192 }
Peter Collingbourne744d90b2011-10-06 16:49:54 +00004193 }
Justin Holewinski818eafb2011-10-05 17:58:44 +00004194
Peter Collingbourne744d90b2011-10-06 16:49:54 +00004195 // Perform special handling in CUDA mode.
David Blaikie4e4d0842012-03-11 07:00:24 +00004196 if (M.getLangOpts().CUDA) {
Justin Holewinskidca8f332013-03-30 14:38:24 +00004197 // CUDA __global__ functions get a kernel metadata entry. Since
Peter Collingbourne744d90b2011-10-06 16:49:54 +00004198 // __global__ functions cannot be called from the device, we do not
4199 // need to set the noinline attribute.
4200 if (FD->getAttr<CUDAGlobalAttr>())
Justin Holewinskidca8f332013-03-30 14:38:24 +00004201 addKernelMetadata(F);
Justin Holewinski818eafb2011-10-05 17:58:44 +00004202 }
4203}
4204
Justin Holewinskidca8f332013-03-30 14:38:24 +00004205void NVPTXTargetCodeGenInfo::addKernelMetadata(llvm::Function *F) {
4206 llvm::Module *M = F->getParent();
4207 llvm::LLVMContext &Ctx = M->getContext();
4208
4209 // Get "nvvm.annotations" metadata node
4210 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
4211
4212 // Create !{<func-ref>, metadata !"kernel", i32 1} node
4213 llvm::SmallVector<llvm::Value *, 3> MDVals;
4214 MDVals.push_back(F);
4215 MDVals.push_back(llvm::MDString::get(Ctx, "kernel"));
4216 MDVals.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1));
4217
4218 // Append metadata to nvvm.annotations
4219 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
4220}
4221
Justin Holewinski0259c3a2011-04-22 11:10:38 +00004222}
4223
4224//===----------------------------------------------------------------------===//
Ulrich Weigandb8409212013-05-06 16:26:41 +00004225// SystemZ ABI Implementation
4226//===----------------------------------------------------------------------===//
4227
4228namespace {
4229
4230class SystemZABIInfo : public ABIInfo {
4231public:
4232 SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
4233
4234 bool isPromotableIntegerType(QualType Ty) const;
4235 bool isCompoundType(QualType Ty) const;
4236 bool isFPArgumentType(QualType Ty) const;
4237
4238 ABIArgInfo classifyReturnType(QualType RetTy) const;
4239 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
4240
4241 virtual void computeInfo(CGFunctionInfo &FI) const {
4242 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4243 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
4244 it != ie; ++it)
4245 it->info = classifyArgumentType(it->type);
4246 }
4247
4248 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4249 CodeGenFunction &CGF) const;
4250};
4251
4252class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
4253public:
4254 SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
4255 : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
4256};
4257
4258}
4259
4260bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
4261 // Treat an enum type as its underlying type.
4262 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4263 Ty = EnumTy->getDecl()->getIntegerType();
4264
4265 // Promotable integer types are required to be promoted by the ABI.
4266 if (Ty->isPromotableIntegerType())
4267 return true;
4268
4269 // 32-bit values must also be promoted.
4270 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4271 switch (BT->getKind()) {
4272 case BuiltinType::Int:
4273 case BuiltinType::UInt:
4274 return true;
4275 default:
4276 return false;
4277 }
4278 return false;
4279}
4280
4281bool SystemZABIInfo::isCompoundType(QualType Ty) const {
4282 return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty);
4283}
4284
4285bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
4286 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4287 switch (BT->getKind()) {
4288 case BuiltinType::Float:
4289 case BuiltinType::Double:
4290 return true;
4291 default:
4292 return false;
4293 }
4294
4295 if (const RecordType *RT = Ty->getAsStructureType()) {
4296 const RecordDecl *RD = RT->getDecl();
4297 bool Found = false;
4298
4299 // If this is a C++ record, check the bases first.
4300 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
4301 for (CXXRecordDecl::base_class_const_iterator I = CXXRD->bases_begin(),
4302 E = CXXRD->bases_end(); I != E; ++I) {
4303 QualType Base = I->getType();
4304
4305 // Empty bases don't affect things either way.
4306 if (isEmptyRecord(getContext(), Base, true))
4307 continue;
4308
4309 if (Found)
4310 return false;
4311 Found = isFPArgumentType(Base);
4312 if (!Found)
4313 return false;
4314 }
4315
4316 // Check the fields.
4317 for (RecordDecl::field_iterator I = RD->field_begin(),
4318 E = RD->field_end(); I != E; ++I) {
4319 const FieldDecl *FD = *I;
4320
4321 // Empty bitfields don't affect things either way.
4322 // Unlike isSingleElementStruct(), empty structure and array fields
4323 // do count. So do anonymous bitfields that aren't zero-sized.
4324 if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
4325 return true;
4326
4327 // Unlike isSingleElementStruct(), arrays do not count.
4328 // Nested isFPArgumentType structures still do though.
4329 if (Found)
4330 return false;
4331 Found = isFPArgumentType(FD->getType());
4332 if (!Found)
4333 return false;
4334 }
4335
4336 // Unlike isSingleElementStruct(), trailing padding is allowed.
4337 // An 8-byte aligned struct s { float f; } is passed as a double.
4338 return Found;
4339 }
4340
4341 return false;
4342}
4343
4344llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4345 CodeGenFunction &CGF) const {
4346 // Assume that va_list type is correct; should be pointer to LLVM type:
4347 // struct {
4348 // i64 __gpr;
4349 // i64 __fpr;
4350 // i8 *__overflow_arg_area;
4351 // i8 *__reg_save_area;
4352 // };
4353
4354 // Every argument occupies 8 bytes and is passed by preference in either
4355 // GPRs or FPRs.
4356 Ty = CGF.getContext().getCanonicalType(Ty);
4357 ABIArgInfo AI = classifyArgumentType(Ty);
4358 bool InFPRs = isFPArgumentType(Ty);
4359
4360 llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
4361 bool IsIndirect = AI.isIndirect();
4362 unsigned UnpaddedBitSize;
4363 if (IsIndirect) {
4364 APTy = llvm::PointerType::getUnqual(APTy);
4365 UnpaddedBitSize = 64;
4366 } else
4367 UnpaddedBitSize = getContext().getTypeSize(Ty);
4368 unsigned PaddedBitSize = 64;
4369 assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size.");
4370
4371 unsigned PaddedSize = PaddedBitSize / 8;
4372 unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
4373
4374 unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
4375 if (InFPRs) {
4376 MaxRegs = 4; // Maximum of 4 FPR arguments
4377 RegCountField = 1; // __fpr
4378 RegSaveIndex = 16; // save offset for f0
4379 RegPadding = 0; // floats are passed in the high bits of an FPR
4380 } else {
4381 MaxRegs = 5; // Maximum of 5 GPR arguments
4382 RegCountField = 0; // __gpr
4383 RegSaveIndex = 2; // save offset for r2
4384 RegPadding = Padding; // values are passed in the low bits of a GPR
4385 }
4386
4387 llvm::Value *RegCountPtr =
4388 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
4389 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
4390 llvm::Type *IndexTy = RegCount->getType();
4391 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
4392 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
4393 "fits_in_regs");
4394
4395 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
4396 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
4397 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
4398 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
4399
4400 // Emit code to load the value if it was passed in registers.
4401 CGF.EmitBlock(InRegBlock);
4402
4403 // Work out the address of an argument register.
4404 llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
4405 llvm::Value *ScaledRegCount =
4406 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
4407 llvm::Value *RegBase =
4408 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding);
4409 llvm::Value *RegOffset =
4410 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
4411 llvm::Value *RegSaveAreaPtr =
4412 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
4413 llvm::Value *RegSaveArea =
4414 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
4415 llvm::Value *RawRegAddr =
4416 CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr");
4417 llvm::Value *RegAddr =
4418 CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr");
4419
4420 // Update the register count
4421 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
4422 llvm::Value *NewRegCount =
4423 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
4424 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
4425 CGF.EmitBranch(ContBlock);
4426
4427 // Emit code to load the value if it was passed in memory.
4428 CGF.EmitBlock(InMemBlock);
4429
4430 // Work out the address of a stack argument.
4431 llvm::Value *OverflowArgAreaPtr =
4432 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
4433 llvm::Value *OverflowArgArea =
4434 CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
4435 llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding);
4436 llvm::Value *RawMemAddr =
4437 CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr");
4438 llvm::Value *MemAddr =
4439 CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr");
4440
4441 // Update overflow_arg_area_ptr pointer
4442 llvm::Value *NewOverflowArgArea =
4443 CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
4444 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
4445 CGF.EmitBranch(ContBlock);
4446
4447 // Return the appropriate result.
4448 CGF.EmitBlock(ContBlock);
4449 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr");
4450 ResAddr->addIncoming(RegAddr, InRegBlock);
4451 ResAddr->addIncoming(MemAddr, InMemBlock);
4452
4453 if (IsIndirect)
4454 return CGF.Builder.CreateLoad(ResAddr, "indirect_arg");
4455
4456 return ResAddr;
4457}
4458
John McCallb8b52972013-06-18 02:46:29 +00004459bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
4460 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
4461 assert(Triple.getArch() == llvm::Triple::x86);
4462
4463 switch (Opts.getStructReturnConvention()) {
4464 case CodeGenOptions::SRCK_Default:
4465 break;
4466 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
4467 return false;
4468 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
4469 return true;
4470 }
4471
4472 if (Triple.isOSDarwin())
4473 return true;
4474
4475 switch (Triple.getOS()) {
4476 case llvm::Triple::Cygwin:
4477 case llvm::Triple::MinGW32:
4478 case llvm::Triple::AuroraUX:
4479 case llvm::Triple::DragonFly:
4480 case llvm::Triple::FreeBSD:
4481 case llvm::Triple::OpenBSD:
4482 case llvm::Triple::Bitrig:
4483 case llvm::Triple::Win32:
4484 return true;
4485 default:
4486 return false;
4487 }
4488}
Ulrich Weigandb8409212013-05-06 16:26:41 +00004489
4490ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
4491 if (RetTy->isVoidType())
4492 return ABIArgInfo::getIgnore();
4493 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
4494 return ABIArgInfo::getIndirect(0);
4495 return (isPromotableIntegerType(RetTy) ?
4496 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4497}
4498
4499ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
4500 // Handle the generic C++ ABI.
4501 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
4502 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
4503
4504 // Integers and enums are extended to full register width.
4505 if (isPromotableIntegerType(Ty))
4506 return ABIArgInfo::getExtend();
4507
4508 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
4509 uint64_t Size = getContext().getTypeSize(Ty);
4510 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
4511 return ABIArgInfo::getIndirect(0);
4512
4513 // Handle small structures.
4514 if (const RecordType *RT = Ty->getAs<RecordType>()) {
4515 // Structures with flexible arrays have variable length, so really
4516 // fail the size test above.
4517 const RecordDecl *RD = RT->getDecl();
4518 if (RD->hasFlexibleArrayMember())
4519 return ABIArgInfo::getIndirect(0);
4520
4521 // The structure is passed as an unextended integer, a float, or a double.
4522 llvm::Type *PassTy;
4523 if (isFPArgumentType(Ty)) {
4524 assert(Size == 32 || Size == 64);
4525 if (Size == 32)
4526 PassTy = llvm::Type::getFloatTy(getVMContext());
4527 else
4528 PassTy = llvm::Type::getDoubleTy(getVMContext());
4529 } else
4530 PassTy = llvm::IntegerType::get(getVMContext(), Size);
4531 return ABIArgInfo::getDirect(PassTy);
4532 }
4533
4534 // Non-structure compounds are passed indirectly.
4535 if (isCompoundType(Ty))
4536 return ABIArgInfo::getIndirect(0);
4537
4538 return ABIArgInfo::getDirect(0);
4539}
4540
4541//===----------------------------------------------------------------------===//
Wesley Peck276fdf42010-12-19 19:57:51 +00004542// MBlaze ABI Implementation
4543//===----------------------------------------------------------------------===//
4544
4545namespace {
4546
4547class MBlazeABIInfo : public ABIInfo {
4548public:
4549 MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
4550
4551 bool isPromotableIntegerType(QualType Ty) const;
4552
4553 ABIArgInfo classifyReturnType(QualType RetTy) const;
4554 ABIArgInfo classifyArgumentType(QualType RetTy) const;
4555
4556 virtual void computeInfo(CGFunctionInfo &FI) const {
4557 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4558 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
4559 it != ie; ++it)
4560 it->info = classifyArgumentType(it->type);
4561 }
4562
4563 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4564 CodeGenFunction &CGF) const;
4565};
4566
4567class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo {
4568public:
4569 MBlazeTargetCodeGenInfo(CodeGenTypes &CGT)
4570 : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {}
4571 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4572 CodeGen::CodeGenModule &M) const;
4573};
4574
4575}
4576
4577bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const {
4578 // MBlaze ABI requires all 8 and 16 bit quantities to be extended.
4579 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4580 switch (BT->getKind()) {
4581 case BuiltinType::Bool:
4582 case BuiltinType::Char_S:
4583 case BuiltinType::Char_U:
4584 case BuiltinType::SChar:
4585 case BuiltinType::UChar:
4586 case BuiltinType::Short:
4587 case BuiltinType::UShort:
4588 return true;
4589 default:
4590 return false;
4591 }
4592 return false;
4593}
4594
4595llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4596 CodeGenFunction &CGF) const {
4597 // FIXME: Implement
4598 return 0;
4599}
4600
4601
4602ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const {
4603 if (RetTy->isVoidType())
4604 return ABIArgInfo::getIgnore();
4605 if (isAggregateTypeForABI(RetTy))
4606 return ABIArgInfo::getIndirect(0);
4607
4608 return (isPromotableIntegerType(RetTy) ?
4609 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4610}
4611
4612ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const {
4613 if (isAggregateTypeForABI(Ty))
4614 return ABIArgInfo::getIndirect(0);
4615
4616 return (isPromotableIntegerType(Ty) ?
4617 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4618}
4619
4620void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
4621 llvm::GlobalValue *GV,
4622 CodeGen::CodeGenModule &M)
4623 const {
4624 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4625 if (!FD) return;
NAKAMURA Takumi125b4cb2011-02-17 08:50:50 +00004626
Wesley Peck276fdf42010-12-19 19:57:51 +00004627 llvm::CallingConv::ID CC = llvm::CallingConv::C;
4628 if (FD->hasAttr<MBlazeInterruptHandlerAttr>())
4629 CC = llvm::CallingConv::MBLAZE_INTR;
4630 else if (FD->hasAttr<MBlazeSaveVolatilesAttr>())
4631 CC = llvm::CallingConv::MBLAZE_SVOL;
4632
4633 if (CC != llvm::CallingConv::C) {
4634 // Handle 'interrupt_handler' attribute:
4635 llvm::Function *F = cast<llvm::Function>(GV);
4636
4637 // Step 1: Set ISR calling convention.
4638 F->setCallingConv(CC);
4639
4640 // Step 2: Add attributes goodness.
Bill Wendling72390b32012-12-20 19:27:06 +00004641 F->addFnAttr(llvm::Attribute::NoInline);
Wesley Peck276fdf42010-12-19 19:57:51 +00004642 }
4643
4644 // Step 3: Emit _interrupt_handler alias.
4645 if (CC == llvm::CallingConv::MBLAZE_INTR)
4646 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
4647 "_interrupt_handler", GV, &M.getModule());
4648}
4649
4650
4651//===----------------------------------------------------------------------===//
Anton Korobeynikov82d0a412010-01-10 12:58:08 +00004652// MSP430 ABI Implementation
Chris Lattnerdce5ad02010-06-28 20:05:43 +00004653//===----------------------------------------------------------------------===//
Anton Korobeynikov82d0a412010-01-10 12:58:08 +00004654
4655namespace {
4656
4657class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
4658public:
Chris Lattnerea044322010-07-29 02:01:43 +00004659 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
4660 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
Anton Korobeynikov82d0a412010-01-10 12:58:08 +00004661 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4662 CodeGen::CodeGenModule &M) const;
4663};
4664
4665}
4666
4667void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
4668 llvm::GlobalValue *GV,
4669 CodeGen::CodeGenModule &M) const {
4670 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
4671 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
4672 // Handle 'interrupt' attribute:
4673 llvm::Function *F = cast<llvm::Function>(GV);
4674
4675 // Step 1: Set ISR calling convention.
4676 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
4677
4678 // Step 2: Add attributes goodness.
Bill Wendling72390b32012-12-20 19:27:06 +00004679 F->addFnAttr(llvm::Attribute::NoInline);
Anton Korobeynikov82d0a412010-01-10 12:58:08 +00004680
4681 // Step 3: Emit ISR vector alias.
Anton Korobeynikovf419a852012-11-26 18:59:10 +00004682 unsigned Num = attr->getNumber() / 2;
Anton Korobeynikov82d0a412010-01-10 12:58:08 +00004683 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
Anton Korobeynikovf419a852012-11-26 18:59:10 +00004684 "__isr_" + Twine(Num),
Anton Korobeynikov82d0a412010-01-10 12:58:08 +00004685 GV, &M.getModule());
4686 }
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00004687 }
4688}
4689
Chris Lattnerdce5ad02010-06-28 20:05:43 +00004690//===----------------------------------------------------------------------===//
John McCallaeeb7012010-05-27 06:19:26 +00004691// MIPS ABI Implementation. This works for both little-endian and
4692// big-endian variants.
Chris Lattnerdce5ad02010-06-28 20:05:43 +00004693//===----------------------------------------------------------------------===//
4694
John McCallaeeb7012010-05-27 06:19:26 +00004695namespace {
Akira Hatanaka619e8872011-06-02 00:09:17 +00004696class MipsABIInfo : public ABIInfo {
Akira Hatanakac0e3b662011-11-02 23:14:57 +00004697 bool IsO32;
Akira Hatanakac359f202012-07-03 19:24:06 +00004698 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
4699 void CoerceToIntArgs(uint64_t TySize,
Craig Topper6b9240e2013-07-05 19:34:19 +00004700 SmallVectorImpl<llvm::Type *> &ArgList) const;
Akira Hatanaka91338cf2012-05-11 21:56:58 +00004701 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
Akira Hatanakac7ecc2e2012-01-04 03:34:42 +00004702 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
Akira Hatanakaa33fd392012-01-09 19:31:25 +00004703 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
Akira Hatanaka619e8872011-06-02 00:09:17 +00004704public:
Akira Hatanakab551dd32011-11-03 00:05:50 +00004705 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
Akira Hatanakac359f202012-07-03 19:24:06 +00004706 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
4707 StackAlignInBytes(IsO32 ? 8 : 16) {}
Akira Hatanaka619e8872011-06-02 00:09:17 +00004708
4709 ABIArgInfo classifyReturnType(QualType RetTy) const;
Akira Hatanakaf0cc2082012-01-07 00:25:33 +00004710 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
Akira Hatanaka619e8872011-06-02 00:09:17 +00004711 virtual void computeInfo(CGFunctionInfo &FI) const;
4712 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4713 CodeGenFunction &CGF) const;
4714};
4715
John McCallaeeb7012010-05-27 06:19:26 +00004716class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
Akira Hatanakae624fa02011-09-20 18:23:28 +00004717 unsigned SizeOfUnwindException;
John McCallaeeb7012010-05-27 06:19:26 +00004718public:
Akira Hatanakac0e3b662011-11-02 23:14:57 +00004719 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
4720 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
4721 SizeOfUnwindException(IsO32 ? 24 : 32) {}
John McCallaeeb7012010-05-27 06:19:26 +00004722
4723 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
4724 return 29;
4725 }
4726
Reed Kotler7dfd1822013-01-16 17:10:28 +00004727 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4728 CodeGen::CodeGenModule &CGM) const {
Reed Kotlerad4b8b42013-03-13 20:40:30 +00004729 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4730 if (!FD) return;
Rafael Espindolad8e6d6d2013-03-19 14:32:23 +00004731 llvm::Function *Fn = cast<llvm::Function>(GV);
Reed Kotlerad4b8b42013-03-13 20:40:30 +00004732 if (FD->hasAttr<Mips16Attr>()) {
4733 Fn->addFnAttr("mips16");
4734 }
4735 else if (FD->hasAttr<NoMips16Attr>()) {
4736 Fn->addFnAttr("nomips16");
4737 }
Reed Kotler7dfd1822013-01-16 17:10:28 +00004738 }
Reed Kotlerad4b8b42013-03-13 20:40:30 +00004739
John McCallaeeb7012010-05-27 06:19:26 +00004740 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
Michael J. Spencer8bea82f2010-08-25 18:17:27 +00004741 llvm::Value *Address) const;
John McCall49e34be2011-08-30 01:42:09 +00004742
4743 unsigned getSizeOfUnwindException() const {
Akira Hatanakae624fa02011-09-20 18:23:28 +00004744 return SizeOfUnwindException;
John McCall49e34be2011-08-30 01:42:09 +00004745 }
John McCallaeeb7012010-05-27 06:19:26 +00004746};
4747}
4748
Akira Hatanakac359f202012-07-03 19:24:06 +00004749void MipsABIInfo::CoerceToIntArgs(uint64_t TySize,
Craig Topper6b9240e2013-07-05 19:34:19 +00004750 SmallVectorImpl<llvm::Type *> &ArgList) const {
Akira Hatanakac359f202012-07-03 19:24:06 +00004751 llvm::IntegerType *IntTy =
4752 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
Akira Hatanaka91338cf2012-05-11 21:56:58 +00004753
4754 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
4755 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
4756 ArgList.push_back(IntTy);
4757
4758 // If necessary, add one more integer type to ArgList.
4759 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
4760
4761 if (R)
4762 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
Akira Hatanaka91338cf2012-05-11 21:56:58 +00004763}
4764
Akira Hatanakad5a257f2011-11-02 23:54:49 +00004765// In N32/64, an aligned double precision floating point field is passed in
4766// a register.
Akira Hatanaka91338cf2012-05-11 21:56:58 +00004767llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
Akira Hatanakac359f202012-07-03 19:24:06 +00004768 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
4769
4770 if (IsO32) {
4771 CoerceToIntArgs(TySize, ArgList);
4772 return llvm::StructType::get(getVMContext(), ArgList);
4773 }
Akira Hatanakad5a257f2011-11-02 23:54:49 +00004774
Akira Hatanaka2afd23d2012-01-12 00:52:17 +00004775 if (Ty->isComplexType())
4776 return CGT.ConvertType(Ty);
Akira Hatanaka6d1080f2012-01-10 23:12:19 +00004777
Akira Hatanakaa34e9212012-02-09 19:54:16 +00004778 const RecordType *RT = Ty->getAs<RecordType>();
Akira Hatanakad5a257f2011-11-02 23:54:49 +00004779
Akira Hatanakac359f202012-07-03 19:24:06 +00004780 // Unions/vectors are passed in integer registers.
4781 if (!RT || !RT->isStructureOrClassType()) {
4782 CoerceToIntArgs(TySize, ArgList);
4783 return llvm::StructType::get(getVMContext(), ArgList);
4784 }
Akira Hatanakad5a257f2011-11-02 23:54:49 +00004785
4786 const RecordDecl *RD = RT->getDecl();
4787 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
Akira Hatanaka91338cf2012-05-11 21:56:58 +00004788 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
Akira Hatanakad5a257f2011-11-02 23:54:49 +00004789
Akira Hatanakad5a257f2011-11-02 23:54:49 +00004790 uint64_t LastOffset = 0;
4791 unsigned idx = 0;
4792 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
4793
Akira Hatanakaa34e9212012-02-09 19:54:16 +00004794 // Iterate over fields in the struct/class and check if there are any aligned
4795 // double fields.
Akira Hatanakad5a257f2011-11-02 23:54:49 +00004796 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
4797 i != e; ++i, ++idx) {
David Blaikie262bc182012-04-30 02:36:29 +00004798 const QualType Ty = i->getType();
Akira Hatanakad5a257f2011-11-02 23:54:49 +00004799 const BuiltinType *BT = Ty->getAs<BuiltinType>();
4800
4801 if (!BT || BT->getKind() != BuiltinType::Double)
4802 continue;
4803
4804 uint64_t Offset = Layout.getFieldOffset(idx);
4805 if (Offset % 64) // Ignore doubles that are not aligned.
4806 continue;
4807
4808 // Add ((Offset - LastOffset) / 64) args of type i64.
4809 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
4810 ArgList.push_back(I64);
4811
4812 // Add double type.
4813 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
4814 LastOffset = Offset + 64;
4815 }
4816
Akira Hatanakac359f202012-07-03 19:24:06 +00004817 CoerceToIntArgs(TySize - LastOffset, IntArgList);
4818 ArgList.append(IntArgList.begin(), IntArgList.end());
Akira Hatanakad5a257f2011-11-02 23:54:49 +00004819
4820 return llvm::StructType::get(getVMContext(), ArgList);
4821}
4822
Akira Hatanakaa33fd392012-01-09 19:31:25 +00004823llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const {
Akira Hatanaka91338cf2012-05-11 21:56:58 +00004824 assert((Offset % MinABIStackAlignInBytes) == 0);
Akira Hatanakaa33fd392012-01-09 19:31:25 +00004825
Akira Hatanaka91338cf2012-05-11 21:56:58 +00004826 if ((Align - 1) & Offset)
4827 return llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
4828
4829 return 0;
Akira Hatanakaa33fd392012-01-09 19:31:25 +00004830}
Akira Hatanaka9659d592012-01-10 22:44:52 +00004831
Akira Hatanakaf0cc2082012-01-07 00:25:33 +00004832ABIArgInfo
4833MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
Akira Hatanakaa33fd392012-01-09 19:31:25 +00004834 uint64_t OrigOffset = Offset;
Akira Hatanaka91338cf2012-05-11 21:56:58 +00004835 uint64_t TySize = getContext().getTypeSize(Ty);
Akira Hatanakaa33fd392012-01-09 19:31:25 +00004836 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
Akira Hatanaka91338cf2012-05-11 21:56:58 +00004837
Akira Hatanakac359f202012-07-03 19:24:06 +00004838 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
4839 (uint64_t)StackAlignInBytes);
Akira Hatanaka91338cf2012-05-11 21:56:58 +00004840 Offset = llvm::RoundUpToAlignment(Offset, Align);
4841 Offset += llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
Akira Hatanakaa33fd392012-01-09 19:31:25 +00004842
Akira Hatanakac359f202012-07-03 19:24:06 +00004843 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
Akira Hatanaka619e8872011-06-02 00:09:17 +00004844 // Ignore empty aggregates.
Akira Hatanakaf0cc2082012-01-07 00:25:33 +00004845 if (TySize == 0)
Akira Hatanaka619e8872011-06-02 00:09:17 +00004846 return ABIArgInfo::getIgnore();
4847
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00004848 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT)) {
Akira Hatanaka91338cf2012-05-11 21:56:58 +00004849 Offset = OrigOffset + MinABIStackAlignInBytes;
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00004850 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
Akira Hatanakaf0cc2082012-01-07 00:25:33 +00004851 }
Akira Hatanaka511949b2011-08-01 18:09:58 +00004852
Akira Hatanaka91338cf2012-05-11 21:56:58 +00004853 // If we have reached here, aggregates are passed directly by coercing to
4854 // another structure type. Padding is inserted if the offset of the
4855 // aggregate is unaligned.
4856 return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
4857 getPaddingType(Align, OrigOffset));
Akira Hatanaka619e8872011-06-02 00:09:17 +00004858 }
4859
4860 // Treat an enum type as its underlying type.
4861 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4862 Ty = EnumTy->getDecl()->getIntegerType();
4863
Akira Hatanakaa33fd392012-01-09 19:31:25 +00004864 if (Ty->isPromotableIntegerType())
4865 return ABIArgInfo::getExtend();
4866
Akira Hatanaka4055cfc2013-01-24 21:47:33 +00004867 return ABIArgInfo::getDirect(0, 0,
4868 IsO32 ? 0 : getPaddingType(Align, OrigOffset));
Akira Hatanaka619e8872011-06-02 00:09:17 +00004869}
4870
Akira Hatanakac7ecc2e2012-01-04 03:34:42 +00004871llvm::Type*
4872MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
Akira Hatanakada54ff32012-02-09 18:49:26 +00004873 const RecordType *RT = RetTy->getAs<RecordType>();
Akira Hatanakac359f202012-07-03 19:24:06 +00004874 SmallVector<llvm::Type*, 8> RTList;
Akira Hatanakac7ecc2e2012-01-04 03:34:42 +00004875
Akira Hatanakada54ff32012-02-09 18:49:26 +00004876 if (RT && RT->isStructureOrClassType()) {
Akira Hatanakac7ecc2e2012-01-04 03:34:42 +00004877 const RecordDecl *RD = RT->getDecl();
Akira Hatanakada54ff32012-02-09 18:49:26 +00004878 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
4879 unsigned FieldCnt = Layout.getFieldCount();
Akira Hatanakac7ecc2e2012-01-04 03:34:42 +00004880
Akira Hatanakada54ff32012-02-09 18:49:26 +00004881 // N32/64 returns struct/classes in floating point registers if the
4882 // following conditions are met:
4883 // 1. The size of the struct/class is no larger than 128-bit.
4884 // 2. The struct/class has one or two fields all of which are floating
4885 // point types.
4886 // 3. The offset of the first field is zero (this follows what gcc does).
4887 //
4888 // Any other composite results are returned in integer registers.
4889 //
4890 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
4891 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
4892 for (; b != e; ++b) {
David Blaikie262bc182012-04-30 02:36:29 +00004893 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
Akira Hatanakac7ecc2e2012-01-04 03:34:42 +00004894
Akira Hatanakada54ff32012-02-09 18:49:26 +00004895 if (!BT || !BT->isFloatingPoint())
4896 break;
Akira Hatanakac7ecc2e2012-01-04 03:34:42 +00004897
David Blaikie262bc182012-04-30 02:36:29 +00004898 RTList.push_back(CGT.ConvertType(b->getType()));
Akira Hatanakada54ff32012-02-09 18:49:26 +00004899 }
4900
4901 if (b == e)
4902 return llvm::StructType::get(getVMContext(), RTList,
4903 RD->hasAttr<PackedAttr>());
4904
4905 RTList.clear();
Akira Hatanakac7ecc2e2012-01-04 03:34:42 +00004906 }
Akira Hatanakac7ecc2e2012-01-04 03:34:42 +00004907 }
4908
Akira Hatanakac359f202012-07-03 19:24:06 +00004909 CoerceToIntArgs(Size, RTList);
Akira Hatanakac7ecc2e2012-01-04 03:34:42 +00004910 return llvm::StructType::get(getVMContext(), RTList);
4911}
4912
Akira Hatanaka619e8872011-06-02 00:09:17 +00004913ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
Akira Hatanakaa8536c02012-01-23 23:18:57 +00004914 uint64_t Size = getContext().getTypeSize(RetTy);
4915
4916 if (RetTy->isVoidType() || Size == 0)
Akira Hatanaka619e8872011-06-02 00:09:17 +00004917 return ABIArgInfo::getIgnore();
4918
Akira Hatanaka8aeb1472012-05-11 21:01:17 +00004919 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00004920 if (isRecordReturnIndirect(RetTy, CGT))
4921 return ABIArgInfo::getIndirect(0);
4922
Akira Hatanakac7ecc2e2012-01-04 03:34:42 +00004923 if (Size <= 128) {
4924 if (RetTy->isAnyComplexType())
4925 return ABIArgInfo::getDirect();
4926
Akira Hatanakac359f202012-07-03 19:24:06 +00004927 // O32 returns integer vectors in registers.
4928 if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())
4929 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
4930
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00004931 if (!IsO32)
Akira Hatanakac7ecc2e2012-01-04 03:34:42 +00004932 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
4933 }
Akira Hatanaka619e8872011-06-02 00:09:17 +00004934
4935 return ABIArgInfo::getIndirect(0);
4936 }
4937
4938 // Treat an enum type as its underlying type.
4939 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4940 RetTy = EnumTy->getDecl()->getIntegerType();
4941
4942 return (RetTy->isPromotableIntegerType() ?
4943 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4944}
4945
4946void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
Akira Hatanakacc662542012-01-12 01:10:09 +00004947 ABIArgInfo &RetInfo = FI.getReturnInfo();
4948 RetInfo = classifyReturnType(FI.getReturnType());
4949
4950 // Check if a pointer to an aggregate is passed as a hidden argument.
Akira Hatanaka91338cf2012-05-11 21:56:58 +00004951 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
Akira Hatanakacc662542012-01-12 01:10:09 +00004952
Akira Hatanaka619e8872011-06-02 00:09:17 +00004953 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
4954 it != ie; ++it)
Akira Hatanakaf0cc2082012-01-07 00:25:33 +00004955 it->info = classifyArgumentType(it->type, Offset);
Akira Hatanaka619e8872011-06-02 00:09:17 +00004956}
4957
4958llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4959 CodeGenFunction &CGF) const {
Chris Lattner8b418682012-02-07 00:39:47 +00004960 llvm::Type *BP = CGF.Int8PtrTy;
4961 llvm::Type *BPP = CGF.Int8PtrPtrTy;
Akira Hatanakac35e69d2011-08-01 20:48:01 +00004962
4963 CGBuilderTy &Builder = CGF.Builder;
4964 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
4965 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
Akira Hatanaka8f675e42012-01-23 23:59:52 +00004966 int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8;
Akira Hatanakac35e69d2011-08-01 20:48:01 +00004967 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4968 llvm::Value *AddrTyped;
John McCall64aa4b32013-04-16 22:48:15 +00004969 unsigned PtrWidth = getTarget().getPointerWidth(0);
Akira Hatanaka8f675e42012-01-23 23:59:52 +00004970 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
Akira Hatanakac35e69d2011-08-01 20:48:01 +00004971
4972 if (TypeAlign > MinABIStackAlignInBytes) {
Akira Hatanaka8f675e42012-01-23 23:59:52 +00004973 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
4974 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
4975 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
4976 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
Akira Hatanakac35e69d2011-08-01 20:48:01 +00004977 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
4978 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
4979 }
4980 else
4981 AddrTyped = Builder.CreateBitCast(Addr, PTy);
4982
4983 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
Akira Hatanaka8f675e42012-01-23 23:59:52 +00004984 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
Akira Hatanakac35e69d2011-08-01 20:48:01 +00004985 uint64_t Offset =
4986 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign);
4987 llvm::Value *NextAddr =
Akira Hatanaka8f675e42012-01-23 23:59:52 +00004988 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
Akira Hatanakac35e69d2011-08-01 20:48:01 +00004989 "ap.next");
4990 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
4991
4992 return AddrTyped;
Akira Hatanaka619e8872011-06-02 00:09:17 +00004993}
4994
John McCallaeeb7012010-05-27 06:19:26 +00004995bool
4996MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4997 llvm::Value *Address) const {
4998 // This information comes from gcc's implementation, which seems to
4999 // as canonical as it gets.
5000
John McCallaeeb7012010-05-27 06:19:26 +00005001 // Everything on MIPS is 4 bytes. Double-precision FP registers
5002 // are aliased to pairs of single-precision FP registers.
Chris Lattner8b418682012-02-07 00:39:47 +00005003 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
John McCallaeeb7012010-05-27 06:19:26 +00005004
5005 // 0-31 are the general purpose registers, $0 - $31.
5006 // 32-63 are the floating-point registers, $f0 - $f31.
5007 // 64 and 65 are the multiply/divide registers, $hi and $lo.
5008 // 66 is the (notional, I think) register for signal-handler return.
Chris Lattner8b418682012-02-07 00:39:47 +00005009 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
John McCallaeeb7012010-05-27 06:19:26 +00005010
5011 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
5012 // They are one bit wide and ignored here.
5013
5014 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
5015 // (coprocessor 1 is the FP unit)
5016 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
5017 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
5018 // 176-181 are the DSP accumulator registers.
Chris Lattner8b418682012-02-07 00:39:47 +00005019 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
John McCallaeeb7012010-05-27 06:19:26 +00005020 return false;
5021}
5022
Peter Collingbourne2f7aa992011-10-13 16:24:41 +00005023//===----------------------------------------------------------------------===//
5024// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
5025// Currently subclassed only to implement custom OpenCL C function attribute
5026// handling.
5027//===----------------------------------------------------------------------===//
5028
5029namespace {
5030
5031class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
5032public:
5033 TCETargetCodeGenInfo(CodeGenTypes &CGT)
5034 : DefaultTargetCodeGenInfo(CGT) {}
5035
5036 virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5037 CodeGen::CodeGenModule &M) const;
5038};
5039
5040void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
5041 llvm::GlobalValue *GV,
5042 CodeGen::CodeGenModule &M) const {
5043 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5044 if (!FD) return;
5045
5046 llvm::Function *F = cast<llvm::Function>(GV);
5047
David Blaikie4e4d0842012-03-11 07:00:24 +00005048 if (M.getLangOpts().OpenCL) {
Peter Collingbourne2f7aa992011-10-13 16:24:41 +00005049 if (FD->hasAttr<OpenCLKernelAttr>()) {
5050 // OpenCL C Kernel functions are not subject to inlining
Bill Wendling72390b32012-12-20 19:27:06 +00005051 F->addFnAttr(llvm::Attribute::NoInline);
Peter Collingbourne2f7aa992011-10-13 16:24:41 +00005052
5053 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) {
5054
5055 // Convert the reqd_work_group_size() attributes to metadata.
5056 llvm::LLVMContext &Context = F->getContext();
5057 llvm::NamedMDNode *OpenCLMetadata =
5058 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info");
5059
5060 SmallVector<llvm::Value*, 5> Operands;
5061 Operands.push_back(F);
5062
Chris Lattner8b418682012-02-07 00:39:47 +00005063 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5064 llvm::APInt(32,
5065 FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim())));
5066 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5067 llvm::APInt(32,
Peter Collingbourne2f7aa992011-10-13 16:24:41 +00005068 FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim())));
Chris Lattner8b418682012-02-07 00:39:47 +00005069 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5070 llvm::APInt(32,
Peter Collingbourne2f7aa992011-10-13 16:24:41 +00005071 FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim())));
5072
5073 // Add a boolean constant operand for "required" (true) or "hint" (false)
5074 // for implementing the work_group_size_hint attr later. Currently
5075 // always true as the hint is not yet implemented.
Chris Lattner8b418682012-02-07 00:39:47 +00005076 Operands.push_back(llvm::ConstantInt::getTrue(Context));
Peter Collingbourne2f7aa992011-10-13 16:24:41 +00005077 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
5078 }
5079 }
5080 }
5081}
5082
5083}
John McCallaeeb7012010-05-27 06:19:26 +00005084
Tony Linthicum96319392011-12-12 21:14:55 +00005085//===----------------------------------------------------------------------===//
5086// Hexagon ABI Implementation
5087//===----------------------------------------------------------------------===//
5088
5089namespace {
5090
5091class HexagonABIInfo : public ABIInfo {
5092
5093
5094public:
5095 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5096
5097private:
5098
5099 ABIArgInfo classifyReturnType(QualType RetTy) const;
5100 ABIArgInfo classifyArgumentType(QualType RetTy) const;
5101
5102 virtual void computeInfo(CGFunctionInfo &FI) const;
5103
5104 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5105 CodeGenFunction &CGF) const;
5106};
5107
5108class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
5109public:
5110 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
5111 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
5112
5113 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
5114 return 29;
5115 }
5116};
5117
5118}
5119
5120void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
5121 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5122 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
5123 it != ie; ++it)
5124 it->info = classifyArgumentType(it->type);
5125}
5126
5127ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
5128 if (!isAggregateTypeForABI(Ty)) {
5129 // Treat an enum type as its underlying type.
5130 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5131 Ty = EnumTy->getDecl()->getIntegerType();
5132
5133 return (Ty->isPromotableIntegerType() ?
5134 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5135 }
5136
5137 // Ignore empty records.
5138 if (isEmptyRecord(getContext(), Ty, true))
5139 return ABIArgInfo::getIgnore();
5140
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00005141 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT))
5142 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
Tony Linthicum96319392011-12-12 21:14:55 +00005143
5144 uint64_t Size = getContext().getTypeSize(Ty);
5145 if (Size > 64)
5146 return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
5147 // Pass in the smallest viable integer type.
5148 else if (Size > 32)
5149 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
5150 else if (Size > 16)
5151 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5152 else if (Size > 8)
5153 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5154 else
5155 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5156}
5157
5158ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
5159 if (RetTy->isVoidType())
5160 return ABIArgInfo::getIgnore();
5161
5162 // Large vector types should be returned via memory.
5163 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
5164 return ABIArgInfo::getIndirect(0);
5165
5166 if (!isAggregateTypeForABI(RetTy)) {
5167 // Treat an enum type as its underlying type.
5168 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5169 RetTy = EnumTy->getDecl()->getIntegerType();
5170
5171 return (RetTy->isPromotableIntegerType() ?
5172 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5173 }
5174
5175 // Structures with either a non-trivial destructor or a non-trivial
5176 // copy constructor are always indirect.
Timur Iskhodzhanoved23bdf2013-04-17 12:54:10 +00005177 if (isRecordReturnIndirect(RetTy, CGT))
Tony Linthicum96319392011-12-12 21:14:55 +00005178 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5179
5180 if (isEmptyRecord(getContext(), RetTy, true))
5181 return ABIArgInfo::getIgnore();
5182
5183 // Aggregates <= 8 bytes are returned in r0; other aggregates
5184 // are returned indirectly.
5185 uint64_t Size = getContext().getTypeSize(RetTy);
5186 if (Size <= 64) {
5187 // Return in the smallest viable integer type.
5188 if (Size <= 8)
5189 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5190 if (Size <= 16)
5191 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5192 if (Size <= 32)
5193 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5194 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
5195 }
5196
5197 return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
5198}
5199
5200llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
Chris Lattner8b418682012-02-07 00:39:47 +00005201 CodeGenFunction &CGF) const {
Tony Linthicum96319392011-12-12 21:14:55 +00005202 // FIXME: Need to handle alignment
Chris Lattner8b418682012-02-07 00:39:47 +00005203 llvm::Type *BPP = CGF.Int8PtrPtrTy;
Tony Linthicum96319392011-12-12 21:14:55 +00005204
5205 CGBuilderTy &Builder = CGF.Builder;
5206 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
5207 "ap");
5208 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5209 llvm::Type *PTy =
5210 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5211 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
5212
5213 uint64_t Offset =
5214 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
5215 llvm::Value *NextAddr =
5216 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
5217 "ap.next");
5218 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5219
5220 return AddrTyped;
5221}
5222
5223
Jakob Stoklund Olesen107196c2013-05-27 21:48:25 +00005224//===----------------------------------------------------------------------===//
5225// SPARC v9 ABI Implementation.
5226// Based on the SPARC Compliance Definition version 2.4.1.
5227//
5228// Function arguments a mapped to a nominal "parameter array" and promoted to
5229// registers depending on their type. Each argument occupies 8 or 16 bytes in
5230// the array, structs larger than 16 bytes are passed indirectly.
5231//
5232// One case requires special care:
5233//
5234// struct mixed {
5235// int i;
5236// float f;
5237// };
5238//
5239// When a struct mixed is passed by value, it only occupies 8 bytes in the
5240// parameter array, but the int is passed in an integer register, and the float
5241// is passed in a floating point register. This is represented as two arguments
5242// with the LLVM IR inreg attribute:
5243//
5244// declare void f(i32 inreg %i, float inreg %f)
5245//
5246// The code generator will only allocate 4 bytes from the parameter array for
5247// the inreg arguments. All other arguments are allocated a multiple of 8
5248// bytes.
5249//
5250namespace {
5251class SparcV9ABIInfo : public ABIInfo {
5252public:
5253 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5254
5255private:
5256 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
5257 virtual void computeInfo(CGFunctionInfo &FI) const;
5258 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5259 CodeGenFunction &CGF) const;
Jakob Stoklund Olesenfc782fb2013-05-28 04:57:37 +00005260
5261 // Coercion type builder for structs passed in registers. The coercion type
5262 // serves two purposes:
5263 //
5264 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
5265 // in registers.
5266 // 2. Expose aligned floating point elements as first-level elements, so the
5267 // code generator knows to pass them in floating point registers.
5268 //
5269 // We also compute the InReg flag which indicates that the struct contains
5270 // aligned 32-bit floats.
5271 //
5272 struct CoerceBuilder {
5273 llvm::LLVMContext &Context;
5274 const llvm::DataLayout &DL;
5275 SmallVector<llvm::Type*, 8> Elems;
5276 uint64_t Size;
5277 bool InReg;
5278
5279 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
5280 : Context(c), DL(dl), Size(0), InReg(false) {}
5281
5282 // Pad Elems with integers until Size is ToSize.
5283 void pad(uint64_t ToSize) {
5284 assert(ToSize >= Size && "Cannot remove elements");
5285 if (ToSize == Size)
5286 return;
5287
5288 // Finish the current 64-bit word.
5289 uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64);
5290 if (Aligned > Size && Aligned <= ToSize) {
5291 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
5292 Size = Aligned;
5293 }
5294
5295 // Add whole 64-bit words.
5296 while (Size + 64 <= ToSize) {
5297 Elems.push_back(llvm::Type::getInt64Ty(Context));
5298 Size += 64;
5299 }
5300
5301 // Final in-word padding.
5302 if (Size < ToSize) {
5303 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
5304 Size = ToSize;
5305 }
5306 }
5307
5308 // Add a floating point element at Offset.
5309 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
5310 // Unaligned floats are treated as integers.
5311 if (Offset % Bits)
5312 return;
5313 // The InReg flag is only required if there are any floats < 64 bits.
5314 if (Bits < 64)
5315 InReg = true;
5316 pad(Offset);
5317 Elems.push_back(Ty);
5318 Size = Offset + Bits;
5319 }
5320
5321 // Add a struct type to the coercion type, starting at Offset (in bits).
5322 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
5323 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
5324 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
5325 llvm::Type *ElemTy = StrTy->getElementType(i);
5326 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
5327 switch (ElemTy->getTypeID()) {
5328 case llvm::Type::StructTyID:
5329 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
5330 break;
5331 case llvm::Type::FloatTyID:
5332 addFloat(ElemOffset, ElemTy, 32);
5333 break;
5334 case llvm::Type::DoubleTyID:
5335 addFloat(ElemOffset, ElemTy, 64);
5336 break;
5337 case llvm::Type::FP128TyID:
5338 addFloat(ElemOffset, ElemTy, 128);
5339 break;
5340 case llvm::Type::PointerTyID:
5341 if (ElemOffset % 64 == 0) {
5342 pad(ElemOffset);
5343 Elems.push_back(ElemTy);
5344 Size += 64;
5345 }
5346 break;
5347 default:
5348 break;
5349 }
5350 }
5351 }
5352
5353 // Check if Ty is a usable substitute for the coercion type.
5354 bool isUsableType(llvm::StructType *Ty) const {
5355 if (Ty->getNumElements() != Elems.size())
5356 return false;
5357 for (unsigned i = 0, e = Elems.size(); i != e; ++i)
5358 if (Elems[i] != Ty->getElementType(i))
5359 return false;
5360 return true;
5361 }
5362
5363 // Get the coercion type as a literal struct type.
5364 llvm::Type *getType() const {
5365 if (Elems.size() == 1)
5366 return Elems.front();
5367 else
5368 return llvm::StructType::get(Context, Elems);
5369 }
5370 };
Jakob Stoklund Olesen107196c2013-05-27 21:48:25 +00005371};
5372} // end anonymous namespace
5373
5374ABIArgInfo
5375SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
5376 if (Ty->isVoidType())
5377 return ABIArgInfo::getIgnore();
5378
5379 uint64_t Size = getContext().getTypeSize(Ty);
5380
5381 // Anything too big to fit in registers is passed with an explicit indirect
5382 // pointer / sret pointer.
5383 if (Size > SizeLimit)
5384 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5385
5386 // Treat an enum type as its underlying type.
5387 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5388 Ty = EnumTy->getDecl()->getIntegerType();
5389
5390 // Integer types smaller than a register are extended.
5391 if (Size < 64 && Ty->isIntegerType())
5392 return ABIArgInfo::getExtend();
5393
5394 // Other non-aggregates go in registers.
5395 if (!isAggregateTypeForABI(Ty))
5396 return ABIArgInfo::getDirect();
5397
5398 // This is a small aggregate type that should be passed in registers.
Jakob Stoklund Olesenfc782fb2013-05-28 04:57:37 +00005399 // Build a coercion type from the LLVM struct type.
5400 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
5401 if (!StrTy)
5402 return ABIArgInfo::getDirect();
5403
5404 CoerceBuilder CB(getVMContext(), getDataLayout());
5405 CB.addStruct(0, StrTy);
5406 CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64));
5407
5408 // Try to use the original type for coercion.
5409 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
5410
5411 if (CB.InReg)
5412 return ABIArgInfo::getDirectInReg(CoerceTy);
5413 else
5414 return ABIArgInfo::getDirect(CoerceTy);
Jakob Stoklund Olesen107196c2013-05-27 21:48:25 +00005415}
5416
5417llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5418 CodeGenFunction &CGF) const {
Jakob Stoklund Olesena4b56d32013-06-05 03:00:18 +00005419 ABIArgInfo AI = classifyType(Ty, 16 * 8);
5420 llvm::Type *ArgTy = CGT.ConvertType(Ty);
5421 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
5422 AI.setCoerceToType(ArgTy);
5423
5424 llvm::Type *BPP = CGF.Int8PtrPtrTy;
5425 CGBuilderTy &Builder = CGF.Builder;
5426 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
5427 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5428 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
5429 llvm::Value *ArgAddr;
5430 unsigned Stride;
5431
5432 switch (AI.getKind()) {
5433 case ABIArgInfo::Expand:
5434 llvm_unreachable("Unsupported ABI kind for va_arg");
5435
5436 case ABIArgInfo::Extend:
5437 Stride = 8;
5438 ArgAddr = Builder
5439 .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy),
5440 "extend");
5441 break;
5442
5443 case ABIArgInfo::Direct:
5444 Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
5445 ArgAddr = Addr;
5446 break;
5447
5448 case ABIArgInfo::Indirect:
5449 Stride = 8;
5450 ArgAddr = Builder.CreateBitCast(Addr,
5451 llvm::PointerType::getUnqual(ArgPtrTy),
5452 "indirect");
5453 ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg");
5454 break;
5455
5456 case ABIArgInfo::Ignore:
5457 return llvm::UndefValue::get(ArgPtrTy);
5458 }
5459
5460 // Update VAList.
5461 Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next");
5462 Builder.CreateStore(Addr, VAListAddrAsBPP);
5463
5464 return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr");
Jakob Stoklund Olesen107196c2013-05-27 21:48:25 +00005465}
5466
5467void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
5468 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
5469 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
5470 it != ie; ++it)
5471 it->info = classifyType(it->type, 16 * 8);
5472}
5473
5474namespace {
5475class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
5476public:
5477 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
5478 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
5479};
5480} // end anonymous namespace
5481
5482
Chris Lattnerea044322010-07-29 02:01:43 +00005483const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
Anton Korobeynikov82d0a412010-01-10 12:58:08 +00005484 if (TheTargetCodeGenInfo)
5485 return *TheTargetCodeGenInfo;
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00005486
John McCall64aa4b32013-04-16 22:48:15 +00005487 const llvm::Triple &Triple = getTarget().getTriple();
Daniel Dunbar1752ee42009-08-24 09:10:05 +00005488 switch (Triple.getArch()) {
Daniel Dunbar2c0843f2009-08-24 08:52:16 +00005489 default:
Chris Lattnerea044322010-07-29 02:01:43 +00005490 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
Daniel Dunbar2c0843f2009-08-24 08:52:16 +00005491
Derek Schuff9ed63f82012-09-06 17:37:28 +00005492 case llvm::Triple::le32:
5493 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
John McCallaeeb7012010-05-27 06:19:26 +00005494 case llvm::Triple::mips:
5495 case llvm::Triple::mipsel:
Akira Hatanakac0e3b662011-11-02 23:14:57 +00005496 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
John McCallaeeb7012010-05-27 06:19:26 +00005497
Akira Hatanaka8c6dfbe2011-09-20 18:30:57 +00005498 case llvm::Triple::mips64:
5499 case llvm::Triple::mips64el:
Akira Hatanakac0e3b662011-11-02 23:14:57 +00005500 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
Akira Hatanaka8c6dfbe2011-09-20 18:30:57 +00005501
Tim Northoverc264e162013-01-31 12:13:10 +00005502 case llvm::Triple::aarch64:
5503 return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types));
5504
Daniel Dunbar34d91fd2009-09-12 00:59:49 +00005505 case llvm::Triple::arm:
5506 case llvm::Triple::thumb:
Sandeep Patel34c1af82011-04-05 00:23:47 +00005507 {
5508 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
John McCall64aa4b32013-04-16 22:48:15 +00005509 if (strcmp(getTarget().getABI(), "apcs-gnu") == 0)
Sandeep Patel34c1af82011-04-05 00:23:47 +00005510 Kind = ARMABIInfo::APCS;
David Tweedb16abb12012-10-25 13:33:01 +00005511 else if (CodeGenOpts.FloatABI == "hard" ||
John McCall64aa4b32013-04-16 22:48:15 +00005512 (CodeGenOpts.FloatABI != "soft" &&
5513 Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
Sandeep Patel34c1af82011-04-05 00:23:47 +00005514 Kind = ARMABIInfo::AAPCS_VFP;
5515
Derek Schuff263366f2012-10-16 22:30:41 +00005516 switch (Triple.getOS()) {
Eli Bendersky441d9f72012-12-04 18:38:10 +00005517 case llvm::Triple::NaCl:
Derek Schuff263366f2012-10-16 22:30:41 +00005518 return *(TheTargetCodeGenInfo =
5519 new NaClARMTargetCodeGenInfo(Types, Kind));
5520 default:
5521 return *(TheTargetCodeGenInfo =
5522 new ARMTargetCodeGenInfo(Types, Kind));
5523 }
Sandeep Patel34c1af82011-04-05 00:23:47 +00005524 }
Daniel Dunbar34d91fd2009-09-12 00:59:49 +00005525
John McCallec853ba2010-03-11 00:10:12 +00005526 case llvm::Triple::ppc:
Chris Lattnerea044322010-07-29 02:01:43 +00005527 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
Roman Divacky0fbc4b92012-05-09 18:22:46 +00005528 case llvm::Triple::ppc64:
Bill Schmidt2fc107f2012-10-03 19:18:57 +00005529 if (Triple.isOSBinFormatELF())
5530 return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types));
5531 else
5532 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
John McCallec853ba2010-03-11 00:10:12 +00005533
Peter Collingbourneedb66f32012-05-20 23:28:41 +00005534 case llvm::Triple::nvptx:
5535 case llvm::Triple::nvptx64:
Justin Holewinski2c585b92012-05-24 17:43:12 +00005536 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
Justin Holewinski0259c3a2011-04-22 11:10:38 +00005537
Wesley Peck276fdf42010-12-19 19:57:51 +00005538 case llvm::Triple::mblaze:
5539 return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types));
5540
Anton Korobeynikov82d0a412010-01-10 12:58:08 +00005541 case llvm::Triple::msp430:
Chris Lattnerea044322010-07-29 02:01:43 +00005542 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
Daniel Dunbar34d91fd2009-09-12 00:59:49 +00005543
Ulrich Weigandb8409212013-05-06 16:26:41 +00005544 case llvm::Triple::systemz:
5545 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
5546
Peter Collingbourne2f7aa992011-10-13 16:24:41 +00005547 case llvm::Triple::tce:
5548 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
5549
Eli Friedmanc3e0fb42011-07-08 23:31:17 +00005550 case llvm::Triple::x86: {
John McCallb8b52972013-06-18 02:46:29 +00005551 bool IsDarwinVectorABI = Triple.isOSDarwin();
5552 bool IsSmallStructInRegABI =
5553 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
5554 bool IsWin32FloatStructABI = (Triple.getOS() == llvm::Triple::Win32);
Daniel Dunbardb57a4c2011-04-19 21:43:27 +00005555
John McCallb8b52972013-06-18 02:46:29 +00005556 if (Triple.getOS() == llvm::Triple::Win32) {
Eli Friedman55fc7e22012-01-25 22:46:34 +00005557 return *(TheTargetCodeGenInfo =
Reid Kleckner3190ca92013-05-08 13:44:39 +00005558 new WinX86_32TargetCodeGenInfo(Types,
John McCallb8b52972013-06-18 02:46:29 +00005559 IsDarwinVectorABI, IsSmallStructInRegABI,
5560 IsWin32FloatStructABI,
Reid Kleckner3190ca92013-05-08 13:44:39 +00005561 CodeGenOpts.NumRegisterParameters));
John McCallb8b52972013-06-18 02:46:29 +00005562 } else {
Anton Korobeynikov82d0a412010-01-10 12:58:08 +00005563 return *(TheTargetCodeGenInfo =
John McCallb8b52972013-06-18 02:46:29 +00005564 new X86_32TargetCodeGenInfo(Types,
5565 IsDarwinVectorABI, IsSmallStructInRegABI,
5566 IsWin32FloatStructABI,
Rafael Espindolab48280b2012-07-31 02:44:24 +00005567 CodeGenOpts.NumRegisterParameters));
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00005568 }
Eli Friedmanc3e0fb42011-07-08 23:31:17 +00005569 }
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00005570
Eli Friedmanee1ad992011-12-02 00:11:43 +00005571 case llvm::Triple::x86_64: {
John McCall64aa4b32013-04-16 22:48:15 +00005572 bool HasAVX = strcmp(getTarget().getABI(), "avx") == 0;
Eli Friedmanee1ad992011-12-02 00:11:43 +00005573
Chris Lattnerf13721d2010-08-31 16:44:54 +00005574 switch (Triple.getOS()) {
5575 case llvm::Triple::Win32:
NAKAMURA Takumi0aa20572011-02-17 08:51:38 +00005576 case llvm::Triple::MinGW32:
Chris Lattnerf13721d2010-08-31 16:44:54 +00005577 case llvm::Triple::Cygwin:
5578 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
Eli Bendersky441d9f72012-12-04 18:38:10 +00005579 case llvm::Triple::NaCl:
John McCall64aa4b32013-04-16 22:48:15 +00005580 return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types,
5581 HasAVX));
Chris Lattnerf13721d2010-08-31 16:44:54 +00005582 default:
Eli Friedmanee1ad992011-12-02 00:11:43 +00005583 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types,
5584 HasAVX));
Chris Lattnerf13721d2010-08-31 16:44:54 +00005585 }
Daniel Dunbar2c0843f2009-08-24 08:52:16 +00005586 }
Tony Linthicum96319392011-12-12 21:14:55 +00005587 case llvm::Triple::hexagon:
5588 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
Jakob Stoklund Olesen107196c2013-05-27 21:48:25 +00005589 case llvm::Triple::sparcv9:
5590 return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types));
Eli Friedmanee1ad992011-12-02 00:11:43 +00005591 }
Anton Korobeynikovc4a59eb2009-06-05 22:08:42 +00005592}