blob: d539ffc51777ecf6815c58726b2e7d92ac210d68 [file] [log] [blame]
Anton Korobeynikov55bcea12010-01-10 12:58:08 +00001//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// These classes wrap the information about a call or function
11// definition used to handle ABI compliancy.
12//
13//===----------------------------------------------------------------------===//
14
Anton Korobeynikov55bcea12010-01-10 12:58:08 +000015#include "TargetInfo.h"
Anton Korobeynikov244360d2009-06-05 22:08:42 +000016#include "ABIInfo.h"
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +000017#include "CGCXXABI.h"
Anton Korobeynikov244360d2009-06-05 22:08:42 +000018#include "CodeGenFunction.h"
Anders Carlsson15b73de2009-07-18 19:43:29 +000019#include "clang/AST/RecordLayout.h"
Mark Laceya8e7df32013-10-30 21:53:58 +000020#include "clang/CodeGen/CGFunctionInfo.h"
Sandeep Patel45df3dd2011-04-05 00:23:47 +000021#include "clang/Frontend/CodeGenOptions.h"
Daniel Dunbare3532f82009-08-24 08:52:16 +000022#include "llvm/ADT/Triple.h"
Chandler Carruthffd55512013-01-02 11:45:17 +000023#include "llvm/IR/DataLayout.h"
24#include "llvm/IR/Type.h"
Daniel Dunbar7230fa52009-12-03 09:13:49 +000025#include "llvm/Support/raw_ostream.h"
Robert Lytton844aeeb2014-05-02 09:33:20 +000026
27#include <algorithm> // std::sort
28
Anton Korobeynikov244360d2009-06-05 22:08:42 +000029using namespace clang;
30using namespace CodeGen;
31
John McCall943fae92010-05-27 06:19:26 +000032static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
33 llvm::Value *Array,
34 llvm::Value *Value,
35 unsigned FirstIndex,
36 unsigned LastIndex) {
37 // Alternatively, we could emit this as a loop in the source.
38 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
39 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
40 Builder.CreateStore(Value, Cell);
41 }
42}
43
John McCalla1dee5302010-08-22 10:59:02 +000044static bool isAggregateTypeForABI(QualType T) {
John McCall47fb9502013-03-07 21:37:08 +000045 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
John McCalla1dee5302010-08-22 10:59:02 +000046 T->isMemberFunctionPointerType();
47}
48
Anton Korobeynikov244360d2009-06-05 22:08:42 +000049ABIInfo::~ABIInfo() {}
50
Mark Lacey3825e832013-10-06 01:33:34 +000051static bool isRecordReturnIndirect(const RecordType *RT,
52 CGCXXABI &CXXABI) {
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +000053 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
54 if (!RD)
55 return false;
Mark Lacey3825e832013-10-06 01:33:34 +000056 return CXXABI.isReturnTypeIndirect(RD);
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +000057}
58
59
Mark Lacey3825e832013-10-06 01:33:34 +000060static bool isRecordReturnIndirect(QualType T, CGCXXABI &CXXABI) {
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +000061 const RecordType *RT = T->getAs<RecordType>();
62 if (!RT)
63 return false;
Mark Lacey3825e832013-10-06 01:33:34 +000064 return isRecordReturnIndirect(RT, CXXABI);
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +000065}
66
67static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
Mark Lacey3825e832013-10-06 01:33:34 +000068 CGCXXABI &CXXABI) {
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +000069 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
70 if (!RD)
71 return CGCXXABI::RAA_Default;
Mark Lacey3825e832013-10-06 01:33:34 +000072 return CXXABI.getRecordArgABI(RD);
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +000073}
74
75static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
Mark Lacey3825e832013-10-06 01:33:34 +000076 CGCXXABI &CXXABI) {
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +000077 const RecordType *RT = T->getAs<RecordType>();
78 if (!RT)
79 return CGCXXABI::RAA_Default;
Mark Lacey3825e832013-10-06 01:33:34 +000080 return getRecordArgABI(RT, CXXABI);
81}
82
83CGCXXABI &ABIInfo::getCXXABI() const {
84 return CGT.getCXXABI();
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +000085}
86
Chris Lattner2b037972010-07-29 02:01:43 +000087ASTContext &ABIInfo::getContext() const {
88 return CGT.getContext();
89}
90
91llvm::LLVMContext &ABIInfo::getVMContext() const {
92 return CGT.getLLVMContext();
93}
94
Micah Villmowdd31ca12012-10-08 16:25:52 +000095const llvm::DataLayout &ABIInfo::getDataLayout() const {
96 return CGT.getDataLayout();
Chris Lattner2b037972010-07-29 02:01:43 +000097}
98
John McCallc8e01702013-04-16 22:48:15 +000099const TargetInfo &ABIInfo::getTarget() const {
100 return CGT.getTarget();
101}
Chris Lattner2b037972010-07-29 02:01:43 +0000102
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000103void ABIArgInfo::dump() const {
Chris Lattner0e62c1c2011-07-23 10:55:15 +0000104 raw_ostream &OS = llvm::errs();
Daniel Dunbar7230fa52009-12-03 09:13:49 +0000105 OS << "(ABIArgInfo Kind=";
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000106 switch (TheKind) {
107 case Direct:
Chris Lattnerfe34c1d2010-07-29 06:26:06 +0000108 OS << "Direct Type=";
Chris Lattner2192fe52011-07-18 04:24:23 +0000109 if (llvm::Type *Ty = getCoerceToType())
Chris Lattnerfe34c1d2010-07-29 06:26:06 +0000110 Ty->print(OS);
111 else
112 OS << "null";
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000113 break;
Anton Korobeynikov18adbf52009-06-06 09:36:29 +0000114 case Extend:
Daniel Dunbar7230fa52009-12-03 09:13:49 +0000115 OS << "Extend";
Anton Korobeynikov18adbf52009-06-06 09:36:29 +0000116 break;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000117 case Ignore:
Daniel Dunbar7230fa52009-12-03 09:13:49 +0000118 OS << "Ignore";
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000119 break;
Reid Kleckner314ef7b2014-02-01 00:04:45 +0000120 case InAlloca:
121 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
122 break;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000123 case Indirect:
Daniel Dunbar557893d2010-04-21 19:10:51 +0000124 OS << "Indirect Align=" << getIndirectAlign()
Joerg Sonnenberger4921fe22011-07-15 18:23:44 +0000125 << " ByVal=" << getIndirectByVal()
Daniel Dunbar7b7c2932010-09-16 20:42:02 +0000126 << " Realign=" << getIndirectRealign();
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000127 break;
128 case Expand:
Daniel Dunbar7230fa52009-12-03 09:13:49 +0000129 OS << "Expand";
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000130 break;
131 }
Daniel Dunbar7230fa52009-12-03 09:13:49 +0000132 OS << ")\n";
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000133}
134
Anton Korobeynikov55bcea12010-01-10 12:58:08 +0000135TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
136
John McCall3480ef22011-08-30 01:42:09 +0000137// If someone can figure out a general rule for this, that would be great.
138// It's probably just doomed to be platform-dependent, though.
139unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
140 // Verified for:
141 // x86-64 FreeBSD, Linux, Darwin
142 // x86-32 FreeBSD, Linux, Darwin
143 // PowerPC Linux, Darwin
144 // ARM Darwin (*not* EABI)
Tim Northover9bb857a2013-01-31 12:13:10 +0000145 // AArch64 Linux
John McCall3480ef22011-08-30 01:42:09 +0000146 return 32;
147}
148
John McCalla729c622012-02-17 03:33:10 +0000149bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
150 const FunctionNoProtoType *fnType) const {
John McCallcbc038a2011-09-21 08:08:30 +0000151 // The following conventions are known to require this to be false:
152 // x86_stdcall
153 // MIPS
154 // For everything else, we just prefer false unless we opt out.
155 return false;
156}
157
Reid Klecknere43f0fe2013-05-08 13:44:39 +0000158void
159TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
160 llvm::SmallString<24> &Opt) const {
161 // This assumes the user is passing a library name like "rt" instead of a
162 // filename like "librt.a/so", and that they don't care whether it's static or
163 // dynamic.
164 Opt = "-l";
165 Opt += Lib;
166}
167
Daniel Dunbar626f1d82009-09-13 08:03:58 +0000168static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000169
Sylvestre Ledru33b5baf2012-09-27 10:16:10 +0000170/// isEmptyField - Return true iff a the field is "empty", that is it
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000171/// is an unnamed bit-field or an (array of) empty record(s).
Daniel Dunbar626f1d82009-09-13 08:03:58 +0000172static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
173 bool AllowArrays) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000174 if (FD->isUnnamedBitfield())
175 return true;
176
177 QualType FT = FD->getType();
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000178
Eli Friedman0b3f2012011-11-18 03:47:20 +0000179 // Constant arrays of empty records count as empty, strip them off.
180 // Constant arrays of zero length always count as empty.
Daniel Dunbar626f1d82009-09-13 08:03:58 +0000181 if (AllowArrays)
Eli Friedman0b3f2012011-11-18 03:47:20 +0000182 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
183 if (AT->getSize() == 0)
184 return true;
Daniel Dunbar626f1d82009-09-13 08:03:58 +0000185 FT = AT->getElementType();
Eli Friedman0b3f2012011-11-18 03:47:20 +0000186 }
Daniel Dunbar626f1d82009-09-13 08:03:58 +0000187
Daniel Dunbarcd20ce12010-05-17 16:46:00 +0000188 const RecordType *RT = FT->getAs<RecordType>();
189 if (!RT)
190 return false;
191
192 // C++ record fields are never empty, at least in the Itanium ABI.
193 //
194 // FIXME: We should use a predicate for whether this behavior is true in the
195 // current ABI.
196 if (isa<CXXRecordDecl>(RT->getDecl()))
197 return false;
198
Daniel Dunbar626f1d82009-09-13 08:03:58 +0000199 return isEmptyRecord(Context, FT, AllowArrays);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000200}
201
Sylvestre Ledru33b5baf2012-09-27 10:16:10 +0000202/// isEmptyRecord - Return true iff a structure contains only empty
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000203/// fields. Note that a structure with a flexible array member is not
204/// considered empty.
Daniel Dunbar626f1d82009-09-13 08:03:58 +0000205static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
Ted Kremenekc23c7e62009-07-29 21:53:49 +0000206 const RecordType *RT = T->getAs<RecordType>();
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000207 if (!RT)
208 return 0;
209 const RecordDecl *RD = RT->getDecl();
210 if (RD->hasFlexibleArrayMember())
211 return false;
Daniel Dunbarcd20ce12010-05-17 16:46:00 +0000212
Argyrios Kyrtzidisd42411f2011-05-17 02:17:52 +0000213 // If this is a C++ record, check the bases first.
Daniel Dunbarcd20ce12010-05-17 16:46:00 +0000214 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
Aaron Ballman574705e2014-03-13 15:41:46 +0000215 for (const auto &I : CXXRD->bases())
216 if (!isEmptyRecord(Context, I.getType(), true))
Argyrios Kyrtzidisd42411f2011-05-17 02:17:52 +0000217 return false;
Daniel Dunbarcd20ce12010-05-17 16:46:00 +0000218
Aaron Ballmane8a8bae2014-03-08 20:12:42 +0000219 for (const auto *I : RD->fields())
220 if (!isEmptyField(Context, I, AllowArrays))
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000221 return false;
222 return true;
223}
224
225/// isSingleElementStruct - Determine if a structure is a "single
226/// element struct", i.e. it has exactly one non-empty field or
227/// exactly one field which is itself a single element
228/// struct. Structures with flexible array members are never
229/// considered single element structs.
230///
231/// \return The field declaration for the single non-empty field, if
232/// it exists.
233static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
234 const RecordType *RT = T->getAsStructureType();
235 if (!RT)
236 return 0;
237
238 const RecordDecl *RD = RT->getDecl();
239 if (RD->hasFlexibleArrayMember())
240 return 0;
241
242 const Type *Found = 0;
Michael J. Spencerb2f376b2010-08-25 18:17:27 +0000243
Daniel Dunbar12ebb472010-05-11 21:15:36 +0000244 // If this is a C++ record, check the bases first.
245 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
Aaron Ballman574705e2014-03-13 15:41:46 +0000246 for (const auto &I : CXXRD->bases()) {
Daniel Dunbar12ebb472010-05-11 21:15:36 +0000247 // Ignore empty records.
Aaron Ballman574705e2014-03-13 15:41:46 +0000248 if (isEmptyRecord(Context, I.getType(), true))
Daniel Dunbar12ebb472010-05-11 21:15:36 +0000249 continue;
250
251 // If we already found an element then this isn't a single-element struct.
252 if (Found)
253 return 0;
254
255 // If this is non-empty and not a single element struct, the composite
256 // cannot be a single element struct.
Aaron Ballman574705e2014-03-13 15:41:46 +0000257 Found = isSingleElementStruct(I.getType(), Context);
Daniel Dunbar12ebb472010-05-11 21:15:36 +0000258 if (!Found)
259 return 0;
260 }
261 }
262
263 // Check for single element.
Aaron Ballmane8a8bae2014-03-08 20:12:42 +0000264 for (const auto *FD : RD->fields()) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000265 QualType FT = FD->getType();
266
267 // Ignore empty fields.
Daniel Dunbar626f1d82009-09-13 08:03:58 +0000268 if (isEmptyField(Context, FD, true))
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000269 continue;
270
271 // If we already found an element then this isn't a single-element
272 // struct.
273 if (Found)
274 return 0;
275
276 // Treat single element arrays as the element.
277 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
278 if (AT->getSize().getZExtValue() != 1)
279 break;
280 FT = AT->getElementType();
281 }
282
John McCalla1dee5302010-08-22 10:59:02 +0000283 if (!isAggregateTypeForABI(FT)) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000284 Found = FT.getTypePtr();
285 } else {
286 Found = isSingleElementStruct(FT, Context);
287 if (!Found)
288 return 0;
289 }
290 }
291
Eli Friedmanee945342011-11-18 01:25:50 +0000292 // We don't consider a struct a single-element struct if it has
293 // padding beyond the element type.
294 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
295 return 0;
296
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000297 return Found;
298}
299
300static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
Eli Friedmana92db672012-11-29 23:21:04 +0000301 // Treat complex types as the element type.
302 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
303 Ty = CTy->getElementType();
304
305 // Check for a type which we know has a simple scalar argument-passing
306 // convention without any padding. (We're specifically looking for 32
307 // and 64-bit integer and integer-equivalents, float, and double.)
Daniel Dunbar6b45b672010-05-14 03:40:53 +0000308 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
Eli Friedmana92db672012-11-29 23:21:04 +0000309 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000310 return false;
311
312 uint64_t Size = Context.getTypeSize(Ty);
313 return Size == 32 || Size == 64;
314}
315
Daniel Dunbar11c08c82009-11-09 01:33:53 +0000316/// canExpandIndirectArgument - Test whether an argument type which is to be
317/// passed indirectly (on the stack) would have the equivalent layout if it was
318/// expanded into separate arguments. If so, we prefer to do the latter to avoid
319/// inhibiting optimizations.
320///
321// FIXME: This predicate is missing many cases, currently it just follows
322// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
323// should probably make this smarter, or better yet make the LLVM backend
324// capable of handling it.
325static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
326 // We can only expand structure types.
327 const RecordType *RT = Ty->getAs<RecordType>();
328 if (!RT)
329 return false;
330
331 // We can only expand (C) structures.
332 //
333 // FIXME: This needs to be generalized to handle classes as well.
334 const RecordDecl *RD = RT->getDecl();
335 if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
336 return false;
337
Eli Friedmane5c85622011-11-18 01:32:26 +0000338 uint64_t Size = 0;
339
Aaron Ballmane8a8bae2014-03-08 20:12:42 +0000340 for (const auto *FD : RD->fields()) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000341 if (!is32Or64BitBasicType(FD->getType(), Context))
342 return false;
343
344 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
345 // how to expand them yet, and the predicate for telling if a bitfield still
346 // counts as "basic" is more complicated than what we were doing previously.
347 if (FD->isBitField())
348 return false;
Eli Friedmane5c85622011-11-18 01:32:26 +0000349
350 Size += Context.getTypeSize(FD->getType());
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000351 }
352
Eli Friedmane5c85622011-11-18 01:32:26 +0000353 // Make sure there are not any holes in the struct.
354 if (Size != Context.getTypeSize(Ty))
355 return false;
356
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000357 return true;
358}
359
360namespace {
361/// DefaultABIInfo - The default implementation for ABI specific
362/// details. This implementation provides information which results in
363/// self-consistent and sensible LLVM IR generation, but does not
364/// conform to any particular ABI.
365class DefaultABIInfo : public ABIInfo {
Chris Lattner2b037972010-07-29 02:01:43 +0000366public:
367 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
Michael J. Spencerb2f376b2010-08-25 18:17:27 +0000368
Chris Lattner458b2aa2010-07-29 02:16:43 +0000369 ABIArgInfo classifyReturnType(QualType RetTy) const;
370 ABIArgInfo classifyArgumentType(QualType RetTy) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000371
Craig Topper4f12f102014-03-12 06:41:41 +0000372 void computeInfo(CGFunctionInfo &FI) const override {
Chris Lattner458b2aa2010-07-29 02:16:43 +0000373 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
Aaron Ballmanec47bc22014-03-17 18:10:01 +0000374 for (auto &I : FI.arguments())
375 I.info = classifyArgumentType(I.type);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000376 }
377
Craig Topper4f12f102014-03-12 06:41:41 +0000378 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
379 CodeGenFunction &CGF) const override;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000380};
381
Anton Korobeynikov55bcea12010-01-10 12:58:08 +0000382class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
383public:
Chris Lattner2b037972010-07-29 02:01:43 +0000384 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
385 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
Anton Korobeynikov55bcea12010-01-10 12:58:08 +0000386};
387
388llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
389 CodeGenFunction &CGF) const {
390 return 0;
391}
392
Chris Lattner458b2aa2010-07-29 02:16:43 +0000393ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
Jan Wen Voung180319f2011-11-03 00:59:44 +0000394 if (isAggregateTypeForABI(Ty)) {
Alp Tokerd4733632013-12-05 04:47:09 +0000395 // Records with non-trivial destructors/constructors should not be passed
Jan Wen Voung180319f2011-11-03 00:59:44 +0000396 // by value.
Mark Lacey3825e832013-10-06 01:33:34 +0000397 if (isRecordReturnIndirect(Ty, getCXXABI()))
Jan Wen Voung180319f2011-11-03 00:59:44 +0000398 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
399
Anton Korobeynikov55bcea12010-01-10 12:58:08 +0000400 return ABIArgInfo::getIndirect(0);
Jan Wen Voung180319f2011-11-03 00:59:44 +0000401 }
Daniel Dunbar557893d2010-04-21 19:10:51 +0000402
Chris Lattner9723d6c2010-03-11 18:19:55 +0000403 // Treat an enum type as its underlying type.
404 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
405 Ty = EnumTy->getDecl()->getIntegerType();
Douglas Gregora71cc152010-02-02 20:10:50 +0000406
Chris Lattner9723d6c2010-03-11 18:19:55 +0000407 return (Ty->isPromotableIntegerType() ?
408 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikov55bcea12010-01-10 12:58:08 +0000409}
410
Bob Wilsonbd4520b2011-01-10 23:54:17 +0000411ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
412 if (RetTy->isVoidType())
413 return ABIArgInfo::getIgnore();
414
415 if (isAggregateTypeForABI(RetTy))
416 return ABIArgInfo::getIndirect(0);
417
418 // Treat an enum type as its underlying type.
419 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
420 RetTy = EnumTy->getDecl()->getIntegerType();
421
422 return (RetTy->isPromotableIntegerType() ?
423 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
424}
425
Derek Schuff09338a22012-09-06 17:37:28 +0000426//===----------------------------------------------------------------------===//
427// le32/PNaCl bitcode ABI Implementation
Eli Bendersky4f6791c2013-04-08 21:31:01 +0000428//
429// This is a simplified version of the x86_32 ABI. Arguments and return values
430// are always passed on the stack.
Derek Schuff09338a22012-09-06 17:37:28 +0000431//===----------------------------------------------------------------------===//
432
433class PNaClABIInfo : public ABIInfo {
434 public:
435 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
436
437 ABIArgInfo classifyReturnType(QualType RetTy) const;
Eli Bendersky4f6791c2013-04-08 21:31:01 +0000438 ABIArgInfo classifyArgumentType(QualType RetTy) const;
Derek Schuff09338a22012-09-06 17:37:28 +0000439
Craig Topper4f12f102014-03-12 06:41:41 +0000440 void computeInfo(CGFunctionInfo &FI) const override;
441 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
442 CodeGenFunction &CGF) const override;
Derek Schuff09338a22012-09-06 17:37:28 +0000443};
444
445class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
446 public:
447 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
448 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
449};
450
451void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
452 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
453
Aaron Ballmanec47bc22014-03-17 18:10:01 +0000454 for (auto &I : FI.arguments())
455 I.info = classifyArgumentType(I.type);
Derek Schuff09338a22012-09-06 17:37:28 +0000456 }
457
458llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
459 CodeGenFunction &CGF) const {
460 return 0;
461}
462
Eli Bendersky4f6791c2013-04-08 21:31:01 +0000463/// \brief Classify argument of given type \p Ty.
464ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
Derek Schuff09338a22012-09-06 17:37:28 +0000465 if (isAggregateTypeForABI(Ty)) {
Mark Lacey3825e832013-10-06 01:33:34 +0000466 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +0000467 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
Derek Schuff09338a22012-09-06 17:37:28 +0000468 return ABIArgInfo::getIndirect(0);
Eli Bendersky4f6791c2013-04-08 21:31:01 +0000469 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
470 // Treat an enum type as its underlying type.
Derek Schuff09338a22012-09-06 17:37:28 +0000471 Ty = EnumTy->getDecl()->getIntegerType();
Eli Bendersky4f6791c2013-04-08 21:31:01 +0000472 } else if (Ty->isFloatingType()) {
473 // Floating-point types don't go inreg.
474 return ABIArgInfo::getDirect();
Derek Schuff09338a22012-09-06 17:37:28 +0000475 }
Eli Bendersky4f6791c2013-04-08 21:31:01 +0000476
477 return (Ty->isPromotableIntegerType() ?
478 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Derek Schuff09338a22012-09-06 17:37:28 +0000479}
480
481ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
482 if (RetTy->isVoidType())
483 return ABIArgInfo::getIgnore();
484
Eli Benderskye20dad62013-04-04 22:49:35 +0000485 // In the PNaCl ABI we always return records/structures on the stack.
Derek Schuff09338a22012-09-06 17:37:28 +0000486 if (isAggregateTypeForABI(RetTy))
487 return ABIArgInfo::getIndirect(0);
488
489 // Treat an enum type as its underlying type.
490 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
491 RetTy = EnumTy->getDecl()->getIntegerType();
492
493 return (RetTy->isPromotableIntegerType() ?
494 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
495}
496
Chad Rosier651c1832013-03-25 21:00:27 +0000497/// IsX86_MMXType - Return true if this is an MMX type.
498bool IsX86_MMXType(llvm::Type *IRType) {
499 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
Bill Wendling5cd41c42010-10-18 03:41:31 +0000500 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
501 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
502 IRType->getScalarSizeInBits() != 64;
503}
504
Jay Foad7c57be32011-07-11 09:56:20 +0000505static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
Chris Lattner0e62c1c2011-07-23 10:55:15 +0000506 StringRef Constraint,
Jay Foad7c57be32011-07-11 09:56:20 +0000507 llvm::Type* Ty) {
Tim Northover0ae93912013-06-07 00:04:50 +0000508 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
509 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
510 // Invalid MMX constraint
511 return 0;
512 }
513
Peter Collingbourne8f5cf742011-02-19 23:03:58 +0000514 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
Tim Northover0ae93912013-06-07 00:04:50 +0000515 }
516
517 // No operation needed
Peter Collingbourne8f5cf742011-02-19 23:03:58 +0000518 return Ty;
519}
520
Chris Lattner0cf24192010-06-28 20:05:43 +0000521//===----------------------------------------------------------------------===//
522// X86-32 ABI Implementation
523//===----------------------------------------------------------------------===//
Michael J. Spencerb2f376b2010-08-25 18:17:27 +0000524
Reid Kleckner661f35b2014-01-18 01:12:41 +0000525/// \brief Similar to llvm::CCState, but for Clang.
526struct CCState {
527 CCState(unsigned CC) : CC(CC), FreeRegs(0) {}
528
529 unsigned CC;
530 unsigned FreeRegs;
Reid Kleckner314ef7b2014-02-01 00:04:45 +0000531 unsigned StackOffset;
532 bool UseInAlloca;
Reid Kleckner661f35b2014-01-18 01:12:41 +0000533};
534
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000535/// X86_32ABIInfo - The X86-32 ABI information.
536class X86_32ABIInfo : public ABIInfo {
Rafael Espindola06b2b4a2012-07-31 02:44:24 +0000537 enum Class {
538 Integer,
539 Float
540 };
541
Daniel Dunbar8a6c91f2010-09-16 20:41:56 +0000542 static const unsigned MinABIStackAlignInBytes = 4;
543
David Chisnallde3a0692009-08-17 23:08:21 +0000544 bool IsDarwinVectorABI;
545 bool IsSmallStructInRegABI;
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +0000546 bool IsWin32StructABI;
Rafael Espindola06b2b4a2012-07-31 02:44:24 +0000547 unsigned DefaultNumRegisterParameters;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000548
549 static bool isRegisterSize(unsigned Size) {
550 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
551 }
552
Reid Kleckner4982b822014-01-31 22:54:50 +0000553 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context,
554 bool IsInstanceMethod) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000555
Daniel Dunbar557893d2010-04-21 19:10:51 +0000556 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
557 /// such that the argument will be passed in memory.
Reid Kleckner661f35b2014-01-18 01:12:41 +0000558 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
559
560 ABIArgInfo getIndirectReturnResult(CCState &State) const;
Daniel Dunbar557893d2010-04-21 19:10:51 +0000561
Daniel Dunbar8a6c91f2010-09-16 20:41:56 +0000562 /// \brief Return the alignment to use for the given type on the stack.
Daniel Dunbardd38fbc2010-09-16 20:42:06 +0000563 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
Daniel Dunbar8a6c91f2010-09-16 20:41:56 +0000564
Rafael Espindola06b2b4a2012-07-31 02:44:24 +0000565 Class classify(QualType Ty) const;
Reid Kleckner4982b822014-01-31 22:54:50 +0000566 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State,
567 bool IsInstanceMethod) const;
Reid Kleckner661f35b2014-01-18 01:12:41 +0000568 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
569 bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000570
Reid Kleckner314ef7b2014-02-01 00:04:45 +0000571 /// \brief Rewrite the function info so that all memory arguments use
572 /// inalloca.
573 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
574
575 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
576 unsigned &StackOffset, ABIArgInfo &Info,
577 QualType Type) const;
578
Rafael Espindola75419dc2012-07-23 23:30:29 +0000579public:
580
Craig Topper4f12f102014-03-12 06:41:41 +0000581 void computeInfo(CGFunctionInfo &FI) const override;
582 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
583 CodeGenFunction &CGF) const override;
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000584
Chad Rosier651c1832013-03-25 21:00:27 +0000585 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w,
Rafael Espindola06b2b4a2012-07-31 02:44:24 +0000586 unsigned r)
Eli Friedman33465822011-07-08 23:31:17 +0000587 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +0000588 IsWin32StructABI(w), DefaultNumRegisterParameters(r) {}
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000589};
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000590
Anton Korobeynikov55bcea12010-01-10 12:58:08 +0000591class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
592public:
Eli Friedmana98d1f82012-01-25 22:46:34 +0000593 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
Chad Rosier651c1832013-03-25 21:00:27 +0000594 bool d, bool p, bool w, unsigned r)
595 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {}
Charles Davis4ea31ab2010-02-13 15:54:06 +0000596
John McCall1fe2a8c2013-06-18 02:46:29 +0000597 static bool isStructReturnInRegABI(
598 const llvm::Triple &Triple, const CodeGenOptions &Opts);
599
Charles Davis4ea31ab2010-02-13 15:54:06 +0000600 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
Craig Topper4f12f102014-03-12 06:41:41 +0000601 CodeGen::CodeGenModule &CGM) const override;
John McCallbeec5a02010-03-06 00:35:14 +0000602
Craig Topper4f12f102014-03-12 06:41:41 +0000603 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
John McCallbeec5a02010-03-06 00:35:14 +0000604 // Darwin uses different dwarf register numbers for EH.
John McCallc8e01702013-04-16 22:48:15 +0000605 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
John McCallbeec5a02010-03-06 00:35:14 +0000606 return 4;
607 }
608
609 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
Craig Topper4f12f102014-03-12 06:41:41 +0000610 llvm::Value *Address) const override;
Peter Collingbourne8f5cf742011-02-19 23:03:58 +0000611
Jay Foad7c57be32011-07-11 09:56:20 +0000612 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
Chris Lattner0e62c1c2011-07-23 10:55:15 +0000613 StringRef Constraint,
Craig Topper4f12f102014-03-12 06:41:41 +0000614 llvm::Type* Ty) const override {
Peter Collingbourne8f5cf742011-02-19 23:03:58 +0000615 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
616 }
617
Craig Topper4f12f102014-03-12 06:41:41 +0000618 llvm::Constant *
619 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
Peter Collingbourneb453cd62013-10-20 21:29:19 +0000620 unsigned Sig = (0xeb << 0) | // jmp rel8
621 (0x06 << 8) | // .+0x08
622 ('F' << 16) |
623 ('T' << 24);
624 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
625 }
626
Anton Korobeynikov55bcea12010-01-10 12:58:08 +0000627};
628
629}
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000630
631/// shouldReturnTypeInRegister - Determine if the given type should be
632/// passed in a register (for the Darwin ABI).
Reid Kleckner4982b822014-01-31 22:54:50 +0000633bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, ASTContext &Context,
634 bool IsInstanceMethod) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000635 uint64_t Size = Context.getTypeSize(Ty);
636
637 // Type must be register sized.
638 if (!isRegisterSize(Size))
639 return false;
640
641 if (Ty->isVectorType()) {
642 // 64- and 128- bit vectors inside structures are not returned in
643 // registers.
644 if (Size == 64 || Size == 128)
645 return false;
646
647 return true;
648 }
649
Daniel Dunbar4bd95c62010-05-15 00:00:30 +0000650 // If this is a builtin, pointer, enum, complex type, member pointer, or
651 // member function pointer it is ok.
Daniel Dunbar6b45b672010-05-14 03:40:53 +0000652 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
Daniel Dunbarb3b1e532009-09-24 05:12:36 +0000653 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
Daniel Dunbar4bd95c62010-05-15 00:00:30 +0000654 Ty->isBlockPointerType() || Ty->isMemberPointerType())
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000655 return true;
656
657 // Arrays are treated like records.
658 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
Aaron Ballman3c424412012-02-22 03:04:13 +0000659 return shouldReturnTypeInRegister(AT->getElementType(), Context,
Reid Kleckner4982b822014-01-31 22:54:50 +0000660 IsInstanceMethod);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000661
662 // Otherwise, it must be a record type.
Ted Kremenekc23c7e62009-07-29 21:53:49 +0000663 const RecordType *RT = Ty->getAs<RecordType>();
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000664 if (!RT) return false;
665
Anders Carlsson40446e82010-01-27 03:25:19 +0000666 // FIXME: Traverse bases here too.
667
Aaron Ballman3c424412012-02-22 03:04:13 +0000668 // For thiscall conventions, structures will never be returned in
669 // a register. This is for compatibility with the MSVC ABI
Reid Kleckner4982b822014-01-31 22:54:50 +0000670 if (IsWin32StructABI && IsInstanceMethod && RT->isStructureType())
Aaron Ballman3c424412012-02-22 03:04:13 +0000671 return false;
Aaron Ballman3c424412012-02-22 03:04:13 +0000672
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000673 // Structure types are passed in register if all fields would be
674 // passed in a register.
Aaron Ballmane8a8bae2014-03-08 20:12:42 +0000675 for (const auto *FD : RT->getDecl()->fields()) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000676 // Empty fields are ignored.
Daniel Dunbar626f1d82009-09-13 08:03:58 +0000677 if (isEmptyField(Context, FD, true))
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000678 continue;
679
680 // Check fields recursively.
Reid Kleckner4982b822014-01-31 22:54:50 +0000681 if (!shouldReturnTypeInRegister(FD->getType(), Context, IsInstanceMethod))
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000682 return false;
683 }
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000684 return true;
685}
686
Reid Kleckner661f35b2014-01-18 01:12:41 +0000687ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &State) const {
688 // If the return value is indirect, then the hidden argument is consuming one
689 // integer register.
690 if (State.FreeRegs) {
691 --State.FreeRegs;
692 return ABIArgInfo::getIndirectInReg(/*Align=*/0, /*ByVal=*/false);
693 }
694 return ABIArgInfo::getIndirect(/*Align=*/0, /*ByVal=*/false);
695}
696
Reid Kleckner4982b822014-01-31 22:54:50 +0000697ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, CCState &State,
698 bool IsInstanceMethod) const {
Chris Lattner458b2aa2010-07-29 02:16:43 +0000699 if (RetTy->isVoidType())
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000700 return ABIArgInfo::getIgnore();
Michael J. Spencerb2f376b2010-08-25 18:17:27 +0000701
Chris Lattner458b2aa2010-07-29 02:16:43 +0000702 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000703 // On Darwin, some vectors are returned in registers.
David Chisnallde3a0692009-08-17 23:08:21 +0000704 if (IsDarwinVectorABI) {
Chris Lattner458b2aa2010-07-29 02:16:43 +0000705 uint64_t Size = getContext().getTypeSize(RetTy);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000706
707 // 128-bit vectors are a special case; they are returned in
708 // registers and we need to make sure to pick a type the LLVM
709 // backend will like.
710 if (Size == 128)
Chris Lattnerfe34c1d2010-07-29 06:26:06 +0000711 return ABIArgInfo::getDirect(llvm::VectorType::get(
Chris Lattner458b2aa2010-07-29 02:16:43 +0000712 llvm::Type::getInt64Ty(getVMContext()), 2));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000713
714 // Always return in register if it fits in a general purpose
715 // register, or if it is 64 bits and has a single element.
716 if ((Size == 8 || Size == 16 || Size == 32) ||
717 (Size == 64 && VT->getNumElements() == 1))
Chris Lattnerfe34c1d2010-07-29 06:26:06 +0000718 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
Chris Lattner458b2aa2010-07-29 02:16:43 +0000719 Size));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000720
Reid Kleckner661f35b2014-01-18 01:12:41 +0000721 return getIndirectReturnResult(State);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000722 }
723
724 return ABIArgInfo::getDirect();
Chris Lattner458b2aa2010-07-29 02:16:43 +0000725 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +0000726
John McCalla1dee5302010-08-22 10:59:02 +0000727 if (isAggregateTypeForABI(RetTy)) {
Anders Carlsson40446e82010-01-27 03:25:19 +0000728 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
Mark Lacey3825e832013-10-06 01:33:34 +0000729 if (isRecordReturnIndirect(RT, getCXXABI()))
Reid Kleckner661f35b2014-01-18 01:12:41 +0000730 return getIndirectReturnResult(State);
Michael J. Spencerb2f376b2010-08-25 18:17:27 +0000731
Anders Carlsson5789c492009-10-20 22:07:59 +0000732 // Structures with flexible arrays are always indirect.
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000733 if (RT->getDecl()->hasFlexibleArrayMember())
Reid Kleckner661f35b2014-01-18 01:12:41 +0000734 return getIndirectReturnResult(State);
Anders Carlsson5789c492009-10-20 22:07:59 +0000735 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +0000736
David Chisnallde3a0692009-08-17 23:08:21 +0000737 // If specified, structs and unions are always indirect.
738 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
Reid Kleckner661f35b2014-01-18 01:12:41 +0000739 return getIndirectReturnResult(State);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000740
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000741 // Small structures which are register sized are generally returned
742 // in a register.
Reid Kleckner4982b822014-01-31 22:54:50 +0000743 if (shouldReturnTypeInRegister(RetTy, getContext(), IsInstanceMethod)) {
Chris Lattner458b2aa2010-07-29 02:16:43 +0000744 uint64_t Size = getContext().getTypeSize(RetTy);
Eli Friedmanee945342011-11-18 01:25:50 +0000745
746 // As a special-case, if the struct is a "single-element" struct, and
747 // the field is of type "float" or "double", return it in a
Eli Friedmana98d1f82012-01-25 22:46:34 +0000748 // floating-point register. (MSVC does not apply this special case.)
749 // We apply a similar transformation for pointer types to improve the
750 // quality of the generated IR.
Eli Friedmanee945342011-11-18 01:25:50 +0000751 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +0000752 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
Eli Friedmana98d1f82012-01-25 22:46:34 +0000753 || SeltTy->hasPointerRepresentation())
Eli Friedmanee945342011-11-18 01:25:50 +0000754 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
755
756 // FIXME: We should be able to narrow this integer in cases with dead
757 // padding.
Chris Lattnerfe34c1d2010-07-29 06:26:06 +0000758 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000759 }
760
Reid Kleckner661f35b2014-01-18 01:12:41 +0000761 return getIndirectReturnResult(State);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000762 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +0000763
Chris Lattner458b2aa2010-07-29 02:16:43 +0000764 // Treat an enum type as its underlying type.
765 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
766 RetTy = EnumTy->getDecl()->getIntegerType();
767
768 return (RetTy->isPromotableIntegerType() ?
769 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000770}
771
Eli Friedman7919bea2012-06-05 19:40:46 +0000772static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
773 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
774}
775
Daniel Dunbared23de32010-09-16 20:42:00 +0000776static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
777 const RecordType *RT = Ty->getAs<RecordType>();
778 if (!RT)
779 return 0;
780 const RecordDecl *RD = RT->getDecl();
781
782 // If this is a C++ record, check the bases first.
783 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
Aaron Ballman574705e2014-03-13 15:41:46 +0000784 for (const auto &I : CXXRD->bases())
785 if (!isRecordWithSSEVectorType(Context, I.getType()))
Daniel Dunbared23de32010-09-16 20:42:00 +0000786 return false;
787
Aaron Ballmane8a8bae2014-03-08 20:12:42 +0000788 for (const auto *i : RD->fields()) {
Daniel Dunbared23de32010-09-16 20:42:00 +0000789 QualType FT = i->getType();
790
Eli Friedman7919bea2012-06-05 19:40:46 +0000791 if (isSSEVectorType(Context, FT))
Daniel Dunbared23de32010-09-16 20:42:00 +0000792 return true;
793
794 if (isRecordWithSSEVectorType(Context, FT))
795 return true;
796 }
797
798 return false;
799}
800
Daniel Dunbardd38fbc2010-09-16 20:42:06 +0000801unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
802 unsigned Align) const {
803 // Otherwise, if the alignment is less than or equal to the minimum ABI
804 // alignment, just use the default; the backend will handle this.
Daniel Dunbar8a6c91f2010-09-16 20:41:56 +0000805 if (Align <= MinABIStackAlignInBytes)
Daniel Dunbardd38fbc2010-09-16 20:42:06 +0000806 return 0; // Use default alignment.
807
808 // On non-Darwin, the stack type alignment is always 4.
809 if (!IsDarwinVectorABI) {
810 // Set explicit alignment, since we may need to realign the top.
Daniel Dunbar8a6c91f2010-09-16 20:41:56 +0000811 return MinABIStackAlignInBytes;
Daniel Dunbardd38fbc2010-09-16 20:42:06 +0000812 }
Daniel Dunbar8a6c91f2010-09-16 20:41:56 +0000813
Daniel Dunbared23de32010-09-16 20:42:00 +0000814 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
Eli Friedman7919bea2012-06-05 19:40:46 +0000815 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
816 isRecordWithSSEVectorType(getContext(), Ty)))
Daniel Dunbared23de32010-09-16 20:42:00 +0000817 return 16;
818
819 return MinABIStackAlignInBytes;
Daniel Dunbar8a6c91f2010-09-16 20:41:56 +0000820}
821
Rafael Espindola703c47f2012-10-19 05:04:37 +0000822ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
Reid Kleckner661f35b2014-01-18 01:12:41 +0000823 CCState &State) const {
Rafael Espindola703c47f2012-10-19 05:04:37 +0000824 if (!ByVal) {
Reid Kleckner661f35b2014-01-18 01:12:41 +0000825 if (State.FreeRegs) {
826 --State.FreeRegs; // Non-byval indirects just use one pointer.
Rafael Espindola703c47f2012-10-19 05:04:37 +0000827 return ABIArgInfo::getIndirectInReg(0, false);
828 }
Daniel Dunbar53fac692010-04-21 19:49:55 +0000829 return ABIArgInfo::getIndirect(0, false);
Rafael Espindola703c47f2012-10-19 05:04:37 +0000830 }
Daniel Dunbar53fac692010-04-21 19:49:55 +0000831
Daniel Dunbardd38fbc2010-09-16 20:42:06 +0000832 // Compute the byval alignment.
833 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
834 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
835 if (StackAlign == 0)
Reid Kleckner314ef7b2014-02-01 00:04:45 +0000836 return ABIArgInfo::getIndirect(4, /*ByVal=*/true);
Daniel Dunbardd38fbc2010-09-16 20:42:06 +0000837
838 // If the stack alignment is less than the type alignment, realign the
839 // argument.
Reid Kleckner314ef7b2014-02-01 00:04:45 +0000840 bool Realign = TypeAlign > StackAlign;
841 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, Realign);
Daniel Dunbar557893d2010-04-21 19:10:51 +0000842}
843
Rafael Espindola06b2b4a2012-07-31 02:44:24 +0000844X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
845 const Type *T = isSingleElementStruct(Ty, getContext());
846 if (!T)
847 T = Ty.getTypePtr();
848
849 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
850 BuiltinType::Kind K = BT->getKind();
851 if (K == BuiltinType::Float || K == BuiltinType::Double)
852 return Float;
853 }
854 return Integer;
855}
856
Reid Kleckner661f35b2014-01-18 01:12:41 +0000857bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State,
858 bool &NeedsPadding) const {
Rafael Espindolafad28de2012-10-24 01:59:00 +0000859 NeedsPadding = false;
Rafael Espindola06b2b4a2012-07-31 02:44:24 +0000860 Class C = classify(Ty);
861 if (C == Float)
Rafael Espindola703c47f2012-10-19 05:04:37 +0000862 return false;
Rafael Espindola06b2b4a2012-07-31 02:44:24 +0000863
Rafael Espindola077dd592012-10-24 01:58:58 +0000864 unsigned Size = getContext().getTypeSize(Ty);
865 unsigned SizeInRegs = (Size + 31) / 32;
Rafael Espindolae2a9e902012-10-23 02:04:01 +0000866
867 if (SizeInRegs == 0)
868 return false;
869
Reid Kleckner661f35b2014-01-18 01:12:41 +0000870 if (SizeInRegs > State.FreeRegs) {
871 State.FreeRegs = 0;
Rafael Espindola703c47f2012-10-19 05:04:37 +0000872 return false;
Rafael Espindola06b2b4a2012-07-31 02:44:24 +0000873 }
Rafael Espindola703c47f2012-10-19 05:04:37 +0000874
Reid Kleckner661f35b2014-01-18 01:12:41 +0000875 State.FreeRegs -= SizeInRegs;
Rafael Espindola077dd592012-10-24 01:58:58 +0000876
Reid Kleckner661f35b2014-01-18 01:12:41 +0000877 if (State.CC == llvm::CallingConv::X86_FastCall) {
Rafael Espindola077dd592012-10-24 01:58:58 +0000878 if (Size > 32)
879 return false;
880
881 if (Ty->isIntegralOrEnumerationType())
882 return true;
883
884 if (Ty->isPointerType())
885 return true;
886
887 if (Ty->isReferenceType())
888 return true;
889
Reid Kleckner661f35b2014-01-18 01:12:41 +0000890 if (State.FreeRegs)
Rafael Espindolafad28de2012-10-24 01:59:00 +0000891 NeedsPadding = true;
892
Rafael Espindola077dd592012-10-24 01:58:58 +0000893 return false;
894 }
895
Rafael Espindola703c47f2012-10-19 05:04:37 +0000896 return true;
Rafael Espindola06b2b4a2012-07-31 02:44:24 +0000897}
898
Reid Kleckner314ef7b2014-02-01 00:04:45 +0000899ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
900 CCState &State) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000901 // FIXME: Set alignment on indirect arguments.
John McCalla1dee5302010-08-22 10:59:02 +0000902 if (isAggregateTypeForABI(Ty)) {
Anders Carlsson40446e82010-01-27 03:25:19 +0000903 if (const RecordType *RT = Ty->getAs<RecordType>()) {
Reid Kleckner314ef7b2014-02-01 00:04:45 +0000904 // Check with the C++ ABI first.
905 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
906 if (RAA == CGCXXABI::RAA_Indirect) {
907 return getIndirectResult(Ty, false, State);
908 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
909 // The field index doesn't matter, we'll fix it up later.
910 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
911 }
912
913 // Structs are always byval on win32, regardless of what they contain.
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +0000914 if (IsWin32StructABI)
Reid Kleckner661f35b2014-01-18 01:12:41 +0000915 return getIndirectResult(Ty, true, State);
Daniel Dunbar557893d2010-04-21 19:10:51 +0000916
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +0000917 // Structures with flexible arrays are always indirect.
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000918 if (RT->getDecl()->hasFlexibleArrayMember())
Reid Kleckner661f35b2014-01-18 01:12:41 +0000919 return getIndirectResult(Ty, true, State);
Anders Carlsson40446e82010-01-27 03:25:19 +0000920 }
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000921
Eli Friedman9f061a32011-11-18 00:28:11 +0000922 // Ignore empty structs/unions.
Eli Friedmanf22fa9e2011-11-18 04:01:36 +0000923 if (isEmptyRecord(getContext(), Ty, true))
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000924 return ABIArgInfo::getIgnore();
925
Rafael Espindolafad28de2012-10-24 01:59:00 +0000926 llvm::LLVMContext &LLVMContext = getVMContext();
927 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
928 bool NeedsPadding;
Reid Kleckner661f35b2014-01-18 01:12:41 +0000929 if (shouldUseInReg(Ty, State, NeedsPadding)) {
Rafael Espindola703c47f2012-10-19 05:04:37 +0000930 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
Craig Topperac9201a2013-07-08 04:47:18 +0000931 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
Rafael Espindola703c47f2012-10-19 05:04:37 +0000932 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
933 return ABIArgInfo::getDirectInReg(Result);
934 }
Rafael Espindolafad28de2012-10-24 01:59:00 +0000935 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : 0;
Rafael Espindola703c47f2012-10-19 05:04:37 +0000936
Daniel Dunbar11c08c82009-11-09 01:33:53 +0000937 // Expand small (<= 128-bit) record types when we know that the stack layout
938 // of those arguments will match the struct. This is important because the
939 // LLVM backend isn't smart enough to remove byval, which inhibits many
940 // optimizations.
Chris Lattner458b2aa2010-07-29 02:16:43 +0000941 if (getContext().getTypeSize(Ty) <= 4*32 &&
942 canExpandIndirectArgument(Ty, getContext()))
Reid Kleckner661f35b2014-01-18 01:12:41 +0000943 return ABIArgInfo::getExpandWithPadding(
944 State.CC == llvm::CallingConv::X86_FastCall, PaddingType);
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000945
Reid Kleckner661f35b2014-01-18 01:12:41 +0000946 return getIndirectResult(Ty, true, State);
Michael J. Spencerb2f376b2010-08-25 18:17:27 +0000947 }
948
Chris Lattnerd774ae92010-08-26 20:05:13 +0000949 if (const VectorType *VT = Ty->getAs<VectorType>()) {
Chris Lattnerd7e54802010-08-26 20:08:43 +0000950 // On Darwin, some vectors are passed in memory, we handle this by passing
951 // it as an i8/i16/i32/i64.
Chris Lattnerd774ae92010-08-26 20:05:13 +0000952 if (IsDarwinVectorABI) {
953 uint64_t Size = getContext().getTypeSize(Ty);
Chris Lattnerd774ae92010-08-26 20:05:13 +0000954 if ((Size == 8 || Size == 16 || Size == 32) ||
955 (Size == 64 && VT->getNumElements() == 1))
956 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
957 Size));
Chris Lattnerd774ae92010-08-26 20:05:13 +0000958 }
Bill Wendling5cd41c42010-10-18 03:41:31 +0000959
Chad Rosier651c1832013-03-25 21:00:27 +0000960 if (IsX86_MMXType(CGT.ConvertType(Ty)))
961 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
Michael J. Spencerf5a1fbc2010-10-19 06:39:39 +0000962
Chris Lattnerd774ae92010-08-26 20:05:13 +0000963 return ABIArgInfo::getDirect();
964 }
Michael J. Spencerf5a1fbc2010-10-19 06:39:39 +0000965
966
Chris Lattner458b2aa2010-07-29 02:16:43 +0000967 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
968 Ty = EnumTy->getDecl()->getIntegerType();
Douglas Gregora71cc152010-02-02 20:10:50 +0000969
Rafael Espindolafad28de2012-10-24 01:59:00 +0000970 bool NeedsPadding;
Reid Kleckner661f35b2014-01-18 01:12:41 +0000971 bool InReg = shouldUseInReg(Ty, State, NeedsPadding);
Rafael Espindola703c47f2012-10-19 05:04:37 +0000972
973 if (Ty->isPromotableIntegerType()) {
974 if (InReg)
975 return ABIArgInfo::getExtendInReg();
976 return ABIArgInfo::getExtend();
977 }
978 if (InReg)
979 return ABIArgInfo::getDirectInReg();
980 return ABIArgInfo::getDirect();
Anton Korobeynikov244360d2009-06-05 22:08:42 +0000981}
982
Rafael Espindolaa6472962012-07-24 00:01:07 +0000983void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
Reid Kleckner661f35b2014-01-18 01:12:41 +0000984 CCState State(FI.getCallingConvention());
985 if (State.CC == llvm::CallingConv::X86_FastCall)
986 State.FreeRegs = 2;
Rafael Espindola077dd592012-10-24 01:58:58 +0000987 else if (FI.getHasRegParm())
Reid Kleckner661f35b2014-01-18 01:12:41 +0000988 State.FreeRegs = FI.getRegParm();
Rafael Espindola077dd592012-10-24 01:58:58 +0000989 else
Reid Kleckner661f35b2014-01-18 01:12:41 +0000990 State.FreeRegs = DefaultNumRegisterParameters;
Rafael Espindola06b2b4a2012-07-31 02:44:24 +0000991
Reid Kleckner4982b822014-01-31 22:54:50 +0000992 FI.getReturnInfo() =
993 classifyReturnType(FI.getReturnType(), State, FI.isInstanceMethod());
994
995 // On win32, use the x86_cdeclmethodcc convention for cdecl methods that use
996 // sret. This convention swaps the order of the first two parameters behind
997 // the scenes to match MSVC.
998 if (IsWin32StructABI && FI.isInstanceMethod() &&
999 FI.getCallingConvention() == llvm::CallingConv::C &&
1000 FI.getReturnInfo().isIndirect())
1001 FI.setEffectiveCallingConvention(llvm::CallingConv::X86_CDeclMethod);
Rafael Espindola06b2b4a2012-07-31 02:44:24 +00001002
Reid Kleckner314ef7b2014-02-01 00:04:45 +00001003 bool UsedInAlloca = false;
Aaron Ballmanec47bc22014-03-17 18:10:01 +00001004 for (auto &I : FI.arguments()) {
1005 I.info = classifyArgumentType(I.type, State);
1006 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
Reid Kleckner314ef7b2014-02-01 00:04:45 +00001007 }
1008
1009 // If we needed to use inalloca for any argument, do a second pass and rewrite
1010 // all the memory arguments to use inalloca.
1011 if (UsedInAlloca)
1012 rewriteWithInAlloca(FI);
1013}
1014
1015void
1016X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1017 unsigned &StackOffset,
1018 ABIArgInfo &Info, QualType Type) const {
Reid Klecknerd378a712014-04-10 19:09:43 +00001019 assert(StackOffset % 4U == 0 && "unaligned inalloca struct");
1020 Info = ABIArgInfo::getInAlloca(FrameFields.size());
1021 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1022 StackOffset += getContext().getTypeSizeInChars(Type).getQuantity();
1023
Reid Kleckner314ef7b2014-02-01 00:04:45 +00001024 // Insert padding bytes to respect alignment. For x86_32, each argument is 4
1025 // byte aligned.
Reid Klecknerd378a712014-04-10 19:09:43 +00001026 if (StackOffset % 4U) {
Reid Kleckner314ef7b2014-02-01 00:04:45 +00001027 unsigned OldOffset = StackOffset;
Reid Klecknerd378a712014-04-10 19:09:43 +00001028 StackOffset = llvm::RoundUpToAlignment(StackOffset, 4U);
Reid Kleckner314ef7b2014-02-01 00:04:45 +00001029 unsigned NumBytes = StackOffset - OldOffset;
1030 assert(NumBytes);
1031 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1032 Ty = llvm::ArrayType::get(Ty, NumBytes);
1033 FrameFields.push_back(Ty);
1034 }
Reid Kleckner314ef7b2014-02-01 00:04:45 +00001035}
1036
1037void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1038 assert(IsWin32StructABI && "inalloca only supported on win32");
1039
1040 // Build a packed struct type for all of the arguments in memory.
1041 SmallVector<llvm::Type *, 6> FrameFields;
1042
1043 unsigned StackOffset = 0;
1044
1045 // Put the sret parameter into the inalloca struct if it's in memory.
1046 ABIArgInfo &Ret = FI.getReturnInfo();
1047 if (Ret.isIndirect() && !Ret.getInReg()) {
1048 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1049 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
Reid Klecknerfab1e892014-02-25 00:59:14 +00001050 // On Windows, the hidden sret parameter is always returned in eax.
1051 Ret.setInAllocaSRet(IsWin32StructABI);
Reid Kleckner314ef7b2014-02-01 00:04:45 +00001052 }
1053
1054 // Skip the 'this' parameter in ecx.
1055 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1056 if (FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall)
1057 ++I;
1058
1059 // Put arguments passed in memory into the struct.
1060 for (; I != E; ++I) {
1061
1062 // Leave ignored and inreg arguments alone.
1063 switch (I->info.getKind()) {
1064 case ABIArgInfo::Indirect:
1065 assert(I->info.getIndirectByVal());
1066 break;
1067 case ABIArgInfo::Ignore:
1068 continue;
1069 case ABIArgInfo::Direct:
1070 case ABIArgInfo::Extend:
1071 if (I->info.getInReg())
1072 continue;
1073 break;
1074 default:
1075 break;
1076 }
1077
1078 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1079 }
1080
1081 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1082 /*isPacked=*/true));
Rafael Espindolaa6472962012-07-24 00:01:07 +00001083}
1084
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001085llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1086 CodeGenFunction &CGF) const {
Chris Lattnerece04092012-02-07 00:39:47 +00001087 llvm::Type *BPP = CGF.Int8PtrPtrTy;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001088
1089 CGBuilderTy &Builder = CGF.Builder;
1090 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1091 "ap");
1092 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
Eli Friedman1d7dd3b2011-11-18 02:12:09 +00001093
1094 // Compute if the address needs to be aligned
1095 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
1096 Align = getTypeStackAlignInBytes(Ty, Align);
1097 Align = std::max(Align, 4U);
1098 if (Align > 4) {
1099 // addr = (addr + align - 1) & -align;
1100 llvm::Value *Offset =
1101 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
1102 Addr = CGF.Builder.CreateGEP(Addr, Offset);
1103 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
1104 CGF.Int32Ty);
1105 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
1106 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1107 Addr->getType(),
1108 "ap.cur.aligned");
1109 }
1110
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001111 llvm::Type *PTy =
Owen Anderson9793f0e2009-07-29 22:16:19 +00001112 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001113 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1114
1115 uint64_t Offset =
Eli Friedman1d7dd3b2011-11-18 02:12:09 +00001116 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001117 llvm::Value *NextAddr =
Chris Lattner5e016ae2010-06-27 07:15:29 +00001118 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001119 "ap.next");
1120 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1121
1122 return AddrTyped;
1123}
1124
Charles Davis4ea31ab2010-02-13 15:54:06 +00001125void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
1126 llvm::GlobalValue *GV,
1127 CodeGen::CodeGenModule &CGM) const {
1128 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
1129 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1130 // Get the LLVM function.
1131 llvm::Function *Fn = cast<llvm::Function>(GV);
1132
1133 // Now add the 'alignstack' attribute with a value of 16.
Bill Wendlinga514ebc2012-10-15 20:36:26 +00001134 llvm::AttrBuilder B;
Bill Wendlingccf94c92012-10-14 03:28:14 +00001135 B.addStackAlignmentAttr(16);
Bill Wendling9a677922013-01-23 00:21:06 +00001136 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1137 llvm::AttributeSet::get(CGM.getLLVMContext(),
1138 llvm::AttributeSet::FunctionIndex,
1139 B));
Charles Davis4ea31ab2010-02-13 15:54:06 +00001140 }
1141 }
1142}
1143
John McCallbeec5a02010-03-06 00:35:14 +00001144bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1145 CodeGen::CodeGenFunction &CGF,
1146 llvm::Value *Address) const {
1147 CodeGen::CGBuilderTy &Builder = CGF.Builder;
John McCallbeec5a02010-03-06 00:35:14 +00001148
Chris Lattnerece04092012-02-07 00:39:47 +00001149 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001150
John McCallbeec5a02010-03-06 00:35:14 +00001151 // 0-7 are the eight integer registers; the order is different
1152 // on Darwin (for EH), but the range is the same.
1153 // 8 is %eip.
John McCall943fae92010-05-27 06:19:26 +00001154 AssignToArrayRange(Builder, Address, Four8, 0, 8);
John McCallbeec5a02010-03-06 00:35:14 +00001155
John McCallc8e01702013-04-16 22:48:15 +00001156 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
John McCallbeec5a02010-03-06 00:35:14 +00001157 // 12-16 are st(0..4). Not sure why we stop at 4.
1158 // These have size 16, which is sizeof(long double) on
1159 // platforms with 8-byte alignment for that type.
Chris Lattnerece04092012-02-07 00:39:47 +00001160 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
John McCall943fae92010-05-27 06:19:26 +00001161 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001162
John McCallbeec5a02010-03-06 00:35:14 +00001163 } else {
1164 // 9 is %eflags, which doesn't get a size on Darwin for some
1165 // reason.
1166 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
1167
1168 // 11-16 are st(0..5). Not sure why we stop at 5.
1169 // These have size 12, which is sizeof(long double) on
1170 // platforms with 4-byte alignment for that type.
Chris Lattnerece04092012-02-07 00:39:47 +00001171 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
John McCall943fae92010-05-27 06:19:26 +00001172 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1173 }
John McCallbeec5a02010-03-06 00:35:14 +00001174
1175 return false;
1176}
1177
Chris Lattner0cf24192010-06-28 20:05:43 +00001178//===----------------------------------------------------------------------===//
1179// X86-64 ABI Implementation
1180//===----------------------------------------------------------------------===//
1181
1182
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001183namespace {
1184/// X86_64ABIInfo - The X86_64 ABI information.
1185class X86_64ABIInfo : public ABIInfo {
1186 enum Class {
1187 Integer = 0,
1188 SSE,
1189 SSEUp,
1190 X87,
1191 X87Up,
1192 ComplexX87,
1193 NoClass,
1194 Memory
1195 };
1196
1197 /// merge - Implement the X86_64 ABI merging algorithm.
1198 ///
1199 /// Merge an accumulating classification \arg Accum with a field
1200 /// classification \arg Field.
1201 ///
1202 /// \param Accum - The accumulating classification. This should
1203 /// always be either NoClass or the result of a previous merge
1204 /// call. In addition, this should never be Memory (the caller
1205 /// should just return Memory for the aggregate).
Chris Lattnerd776fb12010-06-28 21:43:59 +00001206 static Class merge(Class Accum, Class Field);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001207
Bruno Cardoso Lopes21a41bb2011-07-11 22:41:29 +00001208 /// postMerge - Implement the X86_64 ABI post merging algorithm.
1209 ///
1210 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
1211 /// final MEMORY or SSE classes when necessary.
1212 ///
1213 /// \param AggregateSize - The size of the current aggregate in
1214 /// the classification process.
1215 ///
1216 /// \param Lo - The classification for the parts of the type
1217 /// residing in the low word of the containing object.
1218 ///
1219 /// \param Hi - The classification for the parts of the type
1220 /// residing in the higher words of the containing object.
1221 ///
1222 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
1223
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001224 /// classify - Determine the x86_64 register classes in which the
1225 /// given type T should be passed.
1226 ///
1227 /// \param Lo - The classification for the parts of the type
1228 /// residing in the low word of the containing object.
1229 ///
1230 /// \param Hi - The classification for the parts of the type
1231 /// residing in the high word of the containing object.
1232 ///
1233 /// \param OffsetBase - The bit offset of this type in the
1234 /// containing object. Some parameters are classified different
1235 /// depending on whether they straddle an eightbyte boundary.
1236 ///
Eli Friedman96fd2642013-06-12 00:13:45 +00001237 /// \param isNamedArg - Whether the argument in question is a "named"
1238 /// argument, as used in AMD64-ABI 3.5.7.
1239 ///
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001240 /// If a word is unused its result will be NoClass; if a type should
1241 /// be passed in Memory then at least the classification of \arg Lo
1242 /// will be Memory.
1243 ///
Sylvestre Ledru33b5baf2012-09-27 10:16:10 +00001244 /// The \arg Lo class will be NoClass iff the argument is ignored.
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001245 ///
1246 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
1247 /// also be ComplexX87.
Eli Friedman96fd2642013-06-12 00:13:45 +00001248 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1249 bool isNamedArg) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001250
Bruno Cardoso Lopes21a41bb2011-07-11 22:41:29 +00001251 llvm::Type *GetByteVectorType(QualType Ty) const;
Chris Lattnera5f58b02011-07-09 17:41:47 +00001252 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1253 unsigned IROffset, QualType SourceTy,
1254 unsigned SourceOffset) const;
1255 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1256 unsigned IROffset, QualType SourceTy,
1257 unsigned SourceOffset) const;
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001258
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001259 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
Daniel Dunbar53fac692010-04-21 19:49:55 +00001260 /// such that the argument will be returned in memory.
Chris Lattner22a931e2010-06-29 06:01:59 +00001261 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
Daniel Dunbar53fac692010-04-21 19:49:55 +00001262
1263 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001264 /// such that the argument will be passed in memory.
Daniel Dunbarf07b5ec2012-03-10 01:03:58 +00001265 ///
1266 /// \param freeIntRegs - The number of free integer registers remaining
1267 /// available.
1268 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001269
Chris Lattner458b2aa2010-07-29 02:16:43 +00001270 ABIArgInfo classifyReturnType(QualType RetTy) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001271
Bill Wendling5cd41c42010-10-18 03:41:31 +00001272 ABIArgInfo classifyArgumentType(QualType Ty,
Daniel Dunbarf07b5ec2012-03-10 01:03:58 +00001273 unsigned freeIntRegs,
Bill Wendling5cd41c42010-10-18 03:41:31 +00001274 unsigned &neededInt,
Eli Friedman96fd2642013-06-12 00:13:45 +00001275 unsigned &neededSSE,
1276 bool isNamedArg) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001277
Eli Friedmanbfd5add2011-12-02 00:11:43 +00001278 bool IsIllegalVectorType(QualType Ty) const;
1279
John McCalle0fda732011-04-21 01:20:55 +00001280 /// The 0.98 ABI revision clarified a lot of ambiguities,
1281 /// unfortunately in ways that were not always consistent with
1282 /// certain previous compilers. In particular, platforms which
1283 /// required strict binary compatibility with older versions of GCC
1284 /// may need to exempt themselves.
1285 bool honorsRevision0_98() const {
John McCallc8e01702013-04-16 22:48:15 +00001286 return !getTarget().getTriple().isOSDarwin();
John McCalle0fda732011-04-21 01:20:55 +00001287 }
1288
Eli Friedmanbfd5add2011-12-02 00:11:43 +00001289 bool HasAVX;
Derek Schuffc7dd7222012-10-11 15:52:22 +00001290 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
1291 // 64-bit hardware.
1292 bool Has64BitPointers;
Eli Friedmanbfd5add2011-12-02 00:11:43 +00001293
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001294public:
Eli Friedmanbfd5add2011-12-02 00:11:43 +00001295 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
Derek Schuffc7dd7222012-10-11 15:52:22 +00001296 ABIInfo(CGT), HasAVX(hasavx),
Derek Schuff8a872f32012-10-11 18:21:13 +00001297 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
Derek Schuffc7dd7222012-10-11 15:52:22 +00001298 }
Chris Lattner22a931e2010-06-29 06:01:59 +00001299
John McCalla729c622012-02-17 03:33:10 +00001300 bool isPassedUsingAVXType(QualType type) const {
1301 unsigned neededInt, neededSSE;
Daniel Dunbarf07b5ec2012-03-10 01:03:58 +00001302 // The freeIntRegs argument doesn't matter here.
Eli Friedman96fd2642013-06-12 00:13:45 +00001303 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1304 /*isNamedArg*/true);
John McCalla729c622012-02-17 03:33:10 +00001305 if (info.isDirect()) {
1306 llvm::Type *ty = info.getCoerceToType();
1307 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1308 return (vectorTy->getBitWidth() > 128);
1309 }
1310 return false;
1311 }
1312
Craig Topper4f12f102014-03-12 06:41:41 +00001313 void computeInfo(CGFunctionInfo &FI) const override;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001314
Craig Topper4f12f102014-03-12 06:41:41 +00001315 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1316 CodeGenFunction &CGF) const override;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001317};
Anton Korobeynikov55bcea12010-01-10 12:58:08 +00001318
Chris Lattner04dc9572010-08-31 16:44:54 +00001319/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
NAKAMURA Takumibd91f502011-01-17 22:56:31 +00001320class WinX86_64ABIInfo : public ABIInfo {
1321
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +00001322 ABIArgInfo classify(QualType Ty, bool IsReturnType) const;
NAKAMURA Takumibd91f502011-01-17 22:56:31 +00001323
Chris Lattner04dc9572010-08-31 16:44:54 +00001324public:
NAKAMURA Takumibd91f502011-01-17 22:56:31 +00001325 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
1326
Craig Topper4f12f102014-03-12 06:41:41 +00001327 void computeInfo(CGFunctionInfo &FI) const override;
Chris Lattner04dc9572010-08-31 16:44:54 +00001328
Craig Topper4f12f102014-03-12 06:41:41 +00001329 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1330 CodeGenFunction &CGF) const override;
Chris Lattner04dc9572010-08-31 16:44:54 +00001331};
1332
Anton Korobeynikov55bcea12010-01-10 12:58:08 +00001333class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1334public:
Eli Friedmanbfd5add2011-12-02 00:11:43 +00001335 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
Derek Schuffc7dd7222012-10-11 15:52:22 +00001336 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
John McCallbeec5a02010-03-06 00:35:14 +00001337
John McCalla729c622012-02-17 03:33:10 +00001338 const X86_64ABIInfo &getABIInfo() const {
1339 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
1340 }
1341
Craig Topper4f12f102014-03-12 06:41:41 +00001342 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
John McCallbeec5a02010-03-06 00:35:14 +00001343 return 7;
1344 }
1345
1346 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
Craig Topper4f12f102014-03-12 06:41:41 +00001347 llvm::Value *Address) const override {
Chris Lattnerece04092012-02-07 00:39:47 +00001348 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001349
John McCall943fae92010-05-27 06:19:26 +00001350 // 0-15 are the 16 integer registers.
1351 // 16 is %rip.
Chris Lattnerece04092012-02-07 00:39:47 +00001352 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
John McCallbeec5a02010-03-06 00:35:14 +00001353 return false;
1354 }
Peter Collingbourne8f5cf742011-02-19 23:03:58 +00001355
Jay Foad7c57be32011-07-11 09:56:20 +00001356 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
Chris Lattner0e62c1c2011-07-23 10:55:15 +00001357 StringRef Constraint,
Craig Topper4f12f102014-03-12 06:41:41 +00001358 llvm::Type* Ty) const override {
Peter Collingbourne8f5cf742011-02-19 23:03:58 +00001359 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1360 }
1361
John McCalla729c622012-02-17 03:33:10 +00001362 bool isNoProtoCallVariadic(const CallArgList &args,
Craig Topper4f12f102014-03-12 06:41:41 +00001363 const FunctionNoProtoType *fnType) const override {
John McCallcbc038a2011-09-21 08:08:30 +00001364 // The default CC on x86-64 sets %al to the number of SSA
1365 // registers used, and GCC sets this when calling an unprototyped
Eli Friedmanf37bd2f2011-12-01 04:53:19 +00001366 // function, so we override the default behavior. However, don't do
Eli Friedmanb8e45b22011-12-06 03:08:26 +00001367 // that when AVX types are involved: the ABI explicitly states it is
1368 // undefined, and it doesn't work in practice because of how the ABI
1369 // defines varargs anyway.
Reid Kleckner78af0702013-08-27 23:08:25 +00001370 if (fnType->getCallConv() == CC_C) {
Eli Friedmanf37bd2f2011-12-01 04:53:19 +00001371 bool HasAVXType = false;
John McCalla729c622012-02-17 03:33:10 +00001372 for (CallArgList::const_iterator
1373 it = args.begin(), ie = args.end(); it != ie; ++it) {
1374 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
1375 HasAVXType = true;
1376 break;
Eli Friedmanf37bd2f2011-12-01 04:53:19 +00001377 }
1378 }
John McCalla729c622012-02-17 03:33:10 +00001379
Eli Friedmanf37bd2f2011-12-01 04:53:19 +00001380 if (!HasAVXType)
1381 return true;
1382 }
John McCallcbc038a2011-09-21 08:08:30 +00001383
John McCalla729c622012-02-17 03:33:10 +00001384 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
John McCallcbc038a2011-09-21 08:08:30 +00001385 }
1386
Craig Topper4f12f102014-03-12 06:41:41 +00001387 llvm::Constant *
1388 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
Peter Collingbourneb453cd62013-10-20 21:29:19 +00001389 unsigned Sig = (0xeb << 0) | // jmp rel8
1390 (0x0a << 8) | // .+0x0c
1391 ('F' << 16) |
1392 ('T' << 24);
1393 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1394 }
1395
Anton Korobeynikov55bcea12010-01-10 12:58:08 +00001396};
1397
Aaron Ballmanef50ee92013-05-24 15:06:56 +00001398static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
1399 // If the argument does not end in .lib, automatically add the suffix. This
1400 // matches the behavior of MSVC.
1401 std::string ArgStr = Lib;
Rui Ueyama727025a2013-10-31 19:12:53 +00001402 if (!Lib.endswith_lower(".lib"))
Aaron Ballmanef50ee92013-05-24 15:06:56 +00001403 ArgStr += ".lib";
Aaron Ballmanef50ee92013-05-24 15:06:56 +00001404 return ArgStr;
1405}
1406
Reid Klecknere43f0fe2013-05-08 13:44:39 +00001407class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
1408public:
John McCall1fe2a8c2013-06-18 02:46:29 +00001409 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1410 bool d, bool p, bool w, unsigned RegParms)
1411 : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {}
Reid Klecknere43f0fe2013-05-08 13:44:39 +00001412
1413 void getDependentLibraryOption(llvm::StringRef Lib,
Craig Topper4f12f102014-03-12 06:41:41 +00001414 llvm::SmallString<24> &Opt) const override {
Reid Klecknere43f0fe2013-05-08 13:44:39 +00001415 Opt = "/DEFAULTLIB:";
Aaron Ballmanef50ee92013-05-24 15:06:56 +00001416 Opt += qualifyWindowsLibrary(Lib);
Reid Klecknere43f0fe2013-05-08 13:44:39 +00001417 }
Aaron Ballman5d041be2013-06-04 02:07:14 +00001418
1419 void getDetectMismatchOption(llvm::StringRef Name,
1420 llvm::StringRef Value,
Craig Topper4f12f102014-03-12 06:41:41 +00001421 llvm::SmallString<32> &Opt) const override {
Eli Friedmanf60b8ce2013-06-07 22:42:22 +00001422 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
Aaron Ballman5d041be2013-06-04 02:07:14 +00001423 }
Reid Klecknere43f0fe2013-05-08 13:44:39 +00001424};
1425
Chris Lattner04dc9572010-08-31 16:44:54 +00001426class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1427public:
1428 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
1429 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
1430
Craig Topper4f12f102014-03-12 06:41:41 +00001431 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
Chris Lattner04dc9572010-08-31 16:44:54 +00001432 return 7;
1433 }
1434
1435 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
Craig Topper4f12f102014-03-12 06:41:41 +00001436 llvm::Value *Address) const override {
Chris Lattnerece04092012-02-07 00:39:47 +00001437 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
Michael J. Spencerf5a1fbc2010-10-19 06:39:39 +00001438
Chris Lattner04dc9572010-08-31 16:44:54 +00001439 // 0-15 are the 16 integer registers.
1440 // 16 is %rip.
Chris Lattnerece04092012-02-07 00:39:47 +00001441 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
Chris Lattner04dc9572010-08-31 16:44:54 +00001442 return false;
1443 }
Reid Klecknere43f0fe2013-05-08 13:44:39 +00001444
1445 void getDependentLibraryOption(llvm::StringRef Lib,
Craig Topper4f12f102014-03-12 06:41:41 +00001446 llvm::SmallString<24> &Opt) const override {
Reid Klecknere43f0fe2013-05-08 13:44:39 +00001447 Opt = "/DEFAULTLIB:";
Aaron Ballmanef50ee92013-05-24 15:06:56 +00001448 Opt += qualifyWindowsLibrary(Lib);
Reid Klecknere43f0fe2013-05-08 13:44:39 +00001449 }
Aaron Ballman5d041be2013-06-04 02:07:14 +00001450
1451 void getDetectMismatchOption(llvm::StringRef Name,
1452 llvm::StringRef Value,
Craig Topper4f12f102014-03-12 06:41:41 +00001453 llvm::SmallString<32> &Opt) const override {
Eli Friedmanf60b8ce2013-06-07 22:42:22 +00001454 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
Aaron Ballman5d041be2013-06-04 02:07:14 +00001455 }
Chris Lattner04dc9572010-08-31 16:44:54 +00001456};
1457
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001458}
1459
Bruno Cardoso Lopes21a41bb2011-07-11 22:41:29 +00001460void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
1461 Class &Hi) const {
1462 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1463 //
1464 // (a) If one of the classes is Memory, the whole argument is passed in
1465 // memory.
1466 //
1467 // (b) If X87UP is not preceded by X87, the whole argument is passed in
1468 // memory.
1469 //
1470 // (c) If the size of the aggregate exceeds two eightbytes and the first
1471 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
1472 // argument is passed in memory. NOTE: This is necessary to keep the
1473 // ABI working for processors that don't support the __m256 type.
1474 //
1475 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
1476 //
1477 // Some of these are enforced by the merging logic. Others can arise
1478 // only with unions; for example:
1479 // union { _Complex double; unsigned; }
1480 //
1481 // Note that clauses (b) and (c) were added in 0.98.
1482 //
1483 if (Hi == Memory)
1484 Lo = Memory;
1485 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1486 Lo = Memory;
1487 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1488 Lo = Memory;
1489 if (Hi == SSEUp && Lo != SSE)
1490 Hi = SSE;
1491}
1492
Chris Lattnerd776fb12010-06-28 21:43:59 +00001493X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001494 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
1495 // classified recursively so that always two fields are
1496 // considered. The resulting class is calculated according to
1497 // the classes of the fields in the eightbyte:
1498 //
1499 // (a) If both classes are equal, this is the resulting class.
1500 //
1501 // (b) If one of the classes is NO_CLASS, the resulting class is
1502 // the other class.
1503 //
1504 // (c) If one of the classes is MEMORY, the result is the MEMORY
1505 // class.
1506 //
1507 // (d) If one of the classes is INTEGER, the result is the
1508 // INTEGER.
1509 //
1510 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
1511 // MEMORY is used as class.
1512 //
1513 // (f) Otherwise class SSE is used.
1514
1515 // Accum should never be memory (we should have returned) or
1516 // ComplexX87 (because this cannot be passed in a structure).
1517 assert((Accum != Memory && Accum != ComplexX87) &&
1518 "Invalid accumulated classification during merge.");
1519 if (Accum == Field || Field == NoClass)
1520 return Accum;
Chris Lattnerd776fb12010-06-28 21:43:59 +00001521 if (Field == Memory)
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001522 return Memory;
Chris Lattnerd776fb12010-06-28 21:43:59 +00001523 if (Accum == NoClass)
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001524 return Field;
Chris Lattnerd776fb12010-06-28 21:43:59 +00001525 if (Accum == Integer || Field == Integer)
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001526 return Integer;
Chris Lattnerd776fb12010-06-28 21:43:59 +00001527 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1528 Accum == X87 || Accum == X87Up)
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001529 return Memory;
Chris Lattnerd776fb12010-06-28 21:43:59 +00001530 return SSE;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001531}
1532
Chris Lattner5c740f12010-06-30 19:14:05 +00001533void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Eli Friedman96fd2642013-06-12 00:13:45 +00001534 Class &Lo, Class &Hi, bool isNamedArg) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001535 // FIXME: This code can be simplified by introducing a simple value class for
1536 // Class pairs with appropriate constructor methods for the various
1537 // situations.
1538
1539 // FIXME: Some of the split computations are wrong; unaligned vectors
1540 // shouldn't be passed in registers for example, so there is no chance they
1541 // can straddle an eightbyte. Verify & simplify.
1542
1543 Lo = Hi = NoClass;
1544
1545 Class &Current = OffsetBase < 64 ? Lo : Hi;
1546 Current = Memory;
1547
John McCall9dd450b2009-09-21 23:43:11 +00001548 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001549 BuiltinType::Kind k = BT->getKind();
1550
1551 if (k == BuiltinType::Void) {
1552 Current = NoClass;
1553 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1554 Lo = Integer;
1555 Hi = Integer;
1556 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1557 Current = Integer;
Derek Schuff57b7e8f2012-10-11 16:55:58 +00001558 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) ||
1559 (k == BuiltinType::LongDouble &&
Cameron Esfahani556d91e2013-09-14 01:09:11 +00001560 getTarget().getTriple().isOSNaCl())) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001561 Current = SSE;
1562 } else if (k == BuiltinType::LongDouble) {
1563 Lo = X87;
1564 Hi = X87Up;
1565 }
1566 // FIXME: _Decimal32 and _Decimal64 are SSE.
1567 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
Chris Lattnerd776fb12010-06-28 21:43:59 +00001568 return;
1569 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001570
Chris Lattnerd776fb12010-06-28 21:43:59 +00001571 if (const EnumType *ET = Ty->getAs<EnumType>()) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001572 // Classify the underlying integer type.
Eli Friedman96fd2642013-06-12 00:13:45 +00001573 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
Chris Lattnerd776fb12010-06-28 21:43:59 +00001574 return;
1575 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001576
Chris Lattnerd776fb12010-06-28 21:43:59 +00001577 if (Ty->hasPointerRepresentation()) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001578 Current = Integer;
Chris Lattnerd776fb12010-06-28 21:43:59 +00001579 return;
1580 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001581
Chris Lattnerd776fb12010-06-28 21:43:59 +00001582 if (Ty->isMemberPointerType()) {
Derek Schuffc7dd7222012-10-11 15:52:22 +00001583 if (Ty->isMemberFunctionPointerType() && Has64BitPointers)
Daniel Dunbar36d4d152010-05-15 00:00:37 +00001584 Lo = Hi = Integer;
1585 else
1586 Current = Integer;
Chris Lattnerd776fb12010-06-28 21:43:59 +00001587 return;
1588 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001589
Chris Lattnerd776fb12010-06-28 21:43:59 +00001590 if (const VectorType *VT = Ty->getAs<VectorType>()) {
Chris Lattner2b037972010-07-29 02:01:43 +00001591 uint64_t Size = getContext().getTypeSize(VT);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001592 if (Size == 32) {
1593 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
1594 // float> as integer.
1595 Current = Integer;
1596
1597 // If this type crosses an eightbyte boundary, it should be
1598 // split.
1599 uint64_t EB_Real = (OffsetBase) / 64;
1600 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
1601 if (EB_Real != EB_Imag)
1602 Hi = Lo;
1603 } else if (Size == 64) {
1604 // gcc passes <1 x double> in memory. :(
1605 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
1606 return;
1607
1608 // gcc passes <1 x long long> as INTEGER.
Chris Lattner46830f22010-08-26 18:03:20 +00001609 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
Chris Lattner69e683f2010-08-26 18:13:50 +00001610 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
1611 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
1612 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001613 Current = Integer;
1614 else
1615 Current = SSE;
1616
1617 // If this type crosses an eightbyte boundary, it should be
1618 // split.
1619 if (OffsetBase && OffsetBase != 64)
1620 Hi = Lo;
Eli Friedman96fd2642013-06-12 00:13:45 +00001621 } else if (Size == 128 || (HasAVX && isNamedArg && Size == 256)) {
Bruno Cardoso Lopes21a41bb2011-07-11 22:41:29 +00001622 // Arguments of 256-bits are split into four eightbyte chunks. The
1623 // least significant one belongs to class SSE and all the others to class
1624 // SSEUP. The original Lo and Hi design considers that types can't be
1625 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
1626 // This design isn't correct for 256-bits, but since there're no cases
1627 // where the upper parts would need to be inspected, avoid adding
1628 // complexity and just consider Hi to match the 64-256 part.
Eli Friedman96fd2642013-06-12 00:13:45 +00001629 //
1630 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
1631 // registers if they are "named", i.e. not part of the "..." of a
1632 // variadic function.
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001633 Lo = SSE;
1634 Hi = SSEUp;
1635 }
Chris Lattnerd776fb12010-06-28 21:43:59 +00001636 return;
1637 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001638
Chris Lattnerd776fb12010-06-28 21:43:59 +00001639 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
Chris Lattner2b037972010-07-29 02:01:43 +00001640 QualType ET = getContext().getCanonicalType(CT->getElementType());
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001641
Chris Lattner2b037972010-07-29 02:01:43 +00001642 uint64_t Size = getContext().getTypeSize(Ty);
Douglas Gregorb90df602010-06-16 00:17:44 +00001643 if (ET->isIntegralOrEnumerationType()) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001644 if (Size <= 64)
1645 Current = Integer;
1646 else if (Size <= 128)
1647 Lo = Hi = Integer;
Chris Lattner2b037972010-07-29 02:01:43 +00001648 } else if (ET == getContext().FloatTy)
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001649 Current = SSE;
Derek Schuff57b7e8f2012-10-11 16:55:58 +00001650 else if (ET == getContext().DoubleTy ||
1651 (ET == getContext().LongDoubleTy &&
Cameron Esfahani556d91e2013-09-14 01:09:11 +00001652 getTarget().getTriple().isOSNaCl()))
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001653 Lo = Hi = SSE;
Chris Lattner2b037972010-07-29 02:01:43 +00001654 else if (ET == getContext().LongDoubleTy)
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001655 Current = ComplexX87;
1656
1657 // If this complex type crosses an eightbyte boundary then it
1658 // should be split.
1659 uint64_t EB_Real = (OffsetBase) / 64;
Chris Lattner2b037972010-07-29 02:01:43 +00001660 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001661 if (Hi == NoClass && EB_Real != EB_Imag)
1662 Hi = Lo;
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001663
Chris Lattnerd776fb12010-06-28 21:43:59 +00001664 return;
1665 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001666
Chris Lattner2b037972010-07-29 02:01:43 +00001667 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001668 // Arrays are treated like structures.
1669
Chris Lattner2b037972010-07-29 02:01:43 +00001670 uint64_t Size = getContext().getTypeSize(Ty);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001671
1672 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
Bruno Cardoso Lopes21a41bb2011-07-11 22:41:29 +00001673 // than four eightbytes, ..., it has class MEMORY.
1674 if (Size > 256)
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001675 return;
1676
1677 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1678 // fields, it has class MEMORY.
1679 //
1680 // Only need to check alignment of array base.
Chris Lattner2b037972010-07-29 02:01:43 +00001681 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001682 return;
1683
1684 // Otherwise implement simplified merge. We could be smarter about
1685 // this, but it isn't worth it and would be harder to verify.
1686 Current = NoClass;
Chris Lattner2b037972010-07-29 02:01:43 +00001687 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001688 uint64_t ArraySize = AT->getSize().getZExtValue();
Bruno Cardoso Lopes75541d02011-07-12 01:27:38 +00001689
1690 // The only case a 256-bit wide vector could be used is when the array
1691 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1692 // to work for sizes wider than 128, early check and fallback to memory.
1693 if (Size > 128 && EltSize != 256)
1694 return;
1695
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001696 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
1697 Class FieldLo, FieldHi;
Eli Friedman96fd2642013-06-12 00:13:45 +00001698 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001699 Lo = merge(Lo, FieldLo);
1700 Hi = merge(Hi, FieldHi);
1701 if (Lo == Memory || Hi == Memory)
1702 break;
1703 }
1704
Bruno Cardoso Lopes21a41bb2011-07-11 22:41:29 +00001705 postMerge(Size, Lo, Hi);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001706 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
Chris Lattnerd776fb12010-06-28 21:43:59 +00001707 return;
1708 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001709
Chris Lattnerd776fb12010-06-28 21:43:59 +00001710 if (const RecordType *RT = Ty->getAs<RecordType>()) {
Chris Lattner2b037972010-07-29 02:01:43 +00001711 uint64_t Size = getContext().getTypeSize(Ty);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001712
1713 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
Bruno Cardoso Lopes21a41bb2011-07-11 22:41:29 +00001714 // than four eightbytes, ..., it has class MEMORY.
1715 if (Size > 256)
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001716 return;
1717
Anders Carlsson20759ad2009-09-16 15:53:40 +00001718 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
1719 // copy constructor or a non-trivial destructor, it is passed by invisible
1720 // reference.
Mark Lacey3825e832013-10-06 01:33:34 +00001721 if (getRecordArgABI(RT, getCXXABI()))
Anders Carlsson20759ad2009-09-16 15:53:40 +00001722 return;
Daniel Dunbare1cd0152009-11-22 23:01:23 +00001723
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001724 const RecordDecl *RD = RT->getDecl();
1725
1726 // Assume variable sized types are passed in memory.
1727 if (RD->hasFlexibleArrayMember())
1728 return;
1729
Chris Lattner2b037972010-07-29 02:01:43 +00001730 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001731
1732 // Reset Lo class, this will be recomputed.
1733 Current = NoClass;
Daniel Dunbare1cd0152009-11-22 23:01:23 +00001734
1735 // If this is a C++ record, classify the bases first.
1736 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
Aaron Ballman574705e2014-03-13 15:41:46 +00001737 for (const auto &I : CXXRD->bases()) {
1738 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
Daniel Dunbare1cd0152009-11-22 23:01:23 +00001739 "Unexpected base class!");
1740 const CXXRecordDecl *Base =
Aaron Ballman574705e2014-03-13 15:41:46 +00001741 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
Daniel Dunbare1cd0152009-11-22 23:01:23 +00001742
1743 // Classify this field.
1744 //
1745 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
1746 // single eightbyte, each is classified separately. Each eightbyte gets
1747 // initialized to class NO_CLASS.
1748 Class FieldLo, FieldHi;
Benjamin Kramer2ef30312012-07-04 18:45:14 +00001749 uint64_t Offset =
1750 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
Aaron Ballman574705e2014-03-13 15:41:46 +00001751 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
Daniel Dunbare1cd0152009-11-22 23:01:23 +00001752 Lo = merge(Lo, FieldLo);
1753 Hi = merge(Hi, FieldHi);
1754 if (Lo == Memory || Hi == Memory)
1755 break;
1756 }
1757 }
1758
1759 // Classify the fields one at a time, merging the results.
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001760 unsigned idx = 0;
Bruno Cardoso Lopes0aadf832011-07-12 22:30:58 +00001761 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
Argyrios Kyrtzidiscfbfe782009-06-30 02:36:12 +00001762 i != e; ++i, ++idx) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001763 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1764 bool BitField = i->isBitField();
1765
Bruno Cardoso Lopes98154a72011-07-13 21:58:55 +00001766 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
1767 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001768 //
Bruno Cardoso Lopes98154a72011-07-13 21:58:55 +00001769 // The only case a 256-bit wide vector could be used is when the struct
1770 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1771 // to work for sizes wider than 128, early check and fallback to memory.
1772 //
1773 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
1774 Lo = Memory;
1775 return;
1776 }
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001777 // Note, skip this test for bit-fields, see below.
Chris Lattner2b037972010-07-29 02:01:43 +00001778 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001779 Lo = Memory;
1780 return;
1781 }
1782
1783 // Classify this field.
1784 //
1785 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
1786 // exceeds a single eightbyte, each is classified
1787 // separately. Each eightbyte gets initialized to class
1788 // NO_CLASS.
1789 Class FieldLo, FieldHi;
1790
1791 // Bit-fields require special handling, they do not force the
1792 // structure to be passed in memory even if unaligned, and
1793 // therefore they can straddle an eightbyte.
1794 if (BitField) {
1795 // Ignore padding bit-fields.
1796 if (i->isUnnamedBitfield())
1797 continue;
1798
1799 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
Richard Smithcaf33902011-10-10 18:28:20 +00001800 uint64_t Size = i->getBitWidthValue(getContext());
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001801
1802 uint64_t EB_Lo = Offset / 64;
1803 uint64_t EB_Hi = (Offset + Size - 1) / 64;
Sylvestre Ledru0c4813e2013-10-06 09:54:18 +00001804
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001805 if (EB_Lo) {
1806 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
1807 FieldLo = NoClass;
1808 FieldHi = Integer;
1809 } else {
1810 FieldLo = Integer;
1811 FieldHi = EB_Hi ? Integer : NoClass;
1812 }
1813 } else
Eli Friedman96fd2642013-06-12 00:13:45 +00001814 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001815 Lo = merge(Lo, FieldLo);
1816 Hi = merge(Hi, FieldHi);
1817 if (Lo == Memory || Hi == Memory)
1818 break;
1819 }
1820
Bruno Cardoso Lopes21a41bb2011-07-11 22:41:29 +00001821 postMerge(Size, Lo, Hi);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001822 }
1823}
1824
Chris Lattner22a931e2010-06-29 06:01:59 +00001825ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
Daniel Dunbar53fac692010-04-21 19:49:55 +00001826 // If this is a scalar LLVM value then assume LLVM will pass it in the right
1827 // place naturally.
John McCalla1dee5302010-08-22 10:59:02 +00001828 if (!isAggregateTypeForABI(Ty)) {
Daniel Dunbar53fac692010-04-21 19:49:55 +00001829 // Treat an enum type as its underlying type.
1830 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1831 Ty = EnumTy->getDecl()->getIntegerType();
1832
1833 return (Ty->isPromotableIntegerType() ?
1834 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1835 }
1836
1837 return ABIArgInfo::getIndirect(0);
1838}
1839
Eli Friedmanbfd5add2011-12-02 00:11:43 +00001840bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
1841 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
1842 uint64_t Size = getContext().getTypeSize(VecTy);
1843 unsigned LargestVector = HasAVX ? 256 : 128;
1844 if (Size <= 64 || Size > LargestVector)
1845 return true;
1846 }
1847
1848 return false;
1849}
1850
Daniel Dunbarf07b5ec2012-03-10 01:03:58 +00001851ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
1852 unsigned freeIntRegs) const {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001853 // If this is a scalar LLVM value then assume LLVM will pass it in the right
1854 // place naturally.
Daniel Dunbarf07b5ec2012-03-10 01:03:58 +00001855 //
1856 // This assumption is optimistic, as there could be free registers available
1857 // when we need to pass this argument in memory, and LLVM could try to pass
1858 // the argument in the free register. This does not seem to happen currently,
1859 // but this code would be much safer if we could mark the argument with
1860 // 'onstack'. See PR12193.
Eli Friedmanbfd5add2011-12-02 00:11:43 +00001861 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
Douglas Gregora71cc152010-02-02 20:10:50 +00001862 // Treat an enum type as its underlying type.
1863 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1864 Ty = EnumTy->getDecl()->getIntegerType();
1865
Anton Korobeynikov18adbf52009-06-06 09:36:29 +00001866 return (Ty->isPromotableIntegerType() ?
1867 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Douglas Gregora71cc152010-02-02 20:10:50 +00001868 }
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001869
Mark Lacey3825e832013-10-06 01:33:34 +00001870 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +00001871 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
Anders Carlsson20759ad2009-09-16 15:53:40 +00001872
Chris Lattner44c2b902011-05-22 23:21:23 +00001873 // Compute the byval alignment. We specify the alignment of the byval in all
1874 // cases so that the mid-level optimizer knows the alignment of the byval.
1875 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
Daniel Dunbarf07b5ec2012-03-10 01:03:58 +00001876
1877 // Attempt to avoid passing indirect results using byval when possible. This
1878 // is important for good codegen.
1879 //
1880 // We do this by coercing the value into a scalar type which the backend can
1881 // handle naturally (i.e., without using byval).
1882 //
1883 // For simplicity, we currently only do this when we have exhausted all of the
1884 // free integer registers. Doing this when there are free integer registers
1885 // would require more care, as we would have to ensure that the coerced value
1886 // did not claim the unused register. That would require either reording the
1887 // arguments to the function (so that any subsequent inreg values came first),
1888 // or only doing this optimization when there were no following arguments that
1889 // might be inreg.
1890 //
1891 // We currently expect it to be rare (particularly in well written code) for
1892 // arguments to be passed on the stack when there are still free integer
1893 // registers available (this would typically imply large structs being passed
1894 // by value), so this seems like a fair tradeoff for now.
1895 //
1896 // We can revisit this if the backend grows support for 'onstack' parameter
1897 // attributes. See PR12193.
1898 if (freeIntRegs == 0) {
1899 uint64_t Size = getContext().getTypeSize(Ty);
1900
1901 // If this type fits in an eightbyte, coerce it into the matching integral
1902 // type, which will end up on the stack (with alignment 8).
1903 if (Align == 8 && Size <= 64)
1904 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1905 Size));
1906 }
1907
Chris Lattner44c2b902011-05-22 23:21:23 +00001908 return ABIArgInfo::getIndirect(Align);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00001909}
1910
Bruno Cardoso Lopes21a41bb2011-07-11 22:41:29 +00001911/// GetByteVectorType - The ABI specifies that a value should be passed in an
1912/// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a
Chris Lattner4200fe42010-07-29 04:56:46 +00001913/// vector register.
Bruno Cardoso Lopes21a41bb2011-07-11 22:41:29 +00001914llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
Chris Lattnera5f58b02011-07-09 17:41:47 +00001915 llvm::Type *IRType = CGT.ConvertType(Ty);
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001916
Chris Lattner9fa15c32010-07-29 05:02:29 +00001917 // Wrapper structs that just contain vectors are passed just like vectors,
1918 // strip them off if present.
Chris Lattnera5f58b02011-07-09 17:41:47 +00001919 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
Chris Lattner9fa15c32010-07-29 05:02:29 +00001920 while (STy && STy->getNumElements() == 1) {
1921 IRType = STy->getElementType(0);
1922 STy = dyn_cast<llvm::StructType>(IRType);
1923 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001924
Bruno Cardoso Lopes129b4cc2011-07-08 22:57:35 +00001925 // If the preferred type is a 16-byte vector, prefer to pass it.
Chris Lattnera5f58b02011-07-09 17:41:47 +00001926 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
1927 llvm::Type *EltTy = VT->getElementType();
Bruno Cardoso Lopes21a41bb2011-07-11 22:41:29 +00001928 unsigned BitWidth = VT->getBitWidth();
Tanya Lattner71f1b2d2011-11-28 23:18:11 +00001929 if ((BitWidth >= 128 && BitWidth <= 256) &&
Chris Lattner4200fe42010-07-29 04:56:46 +00001930 (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
1931 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
1932 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
1933 EltTy->isIntegerTy(128)))
1934 return VT;
1935 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001936
Chris Lattner4200fe42010-07-29 04:56:46 +00001937 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
1938}
1939
Chris Lattnerc8b7b532010-07-29 07:30:00 +00001940/// BitsContainNoUserData - Return true if the specified [start,end) bit range
1941/// is known to either be off the end of the specified type or being in
1942/// alignment padding. The user type specified is known to be at most 128 bits
1943/// in size, and have passed through X86_64ABIInfo::classify with a successful
1944/// classification that put one of the two halves in the INTEGER class.
1945///
1946/// It is conservatively correct to return false.
1947static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
1948 unsigned EndBit, ASTContext &Context) {
1949 // If the bytes being queried are off the end of the type, there is no user
1950 // data hiding here. This handles analysis of builtins, vectors and other
1951 // types that don't contain interesting padding.
1952 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
1953 if (TySize <= StartBit)
1954 return true;
1955
Chris Lattner98076a22010-07-29 07:43:55 +00001956 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
1957 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
1958 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
1959
1960 // Check each element to see if the element overlaps with the queried range.
1961 for (unsigned i = 0; i != NumElts; ++i) {
1962 // If the element is after the span we care about, then we're done..
1963 unsigned EltOffset = i*EltSize;
1964 if (EltOffset >= EndBit) break;
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001965
Chris Lattner98076a22010-07-29 07:43:55 +00001966 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
1967 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
1968 EndBit-EltOffset, Context))
1969 return false;
1970 }
1971 // If it overlaps no elements, then it is safe to process as padding.
1972 return true;
1973 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001974
Chris Lattnerc8b7b532010-07-29 07:30:00 +00001975 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1976 const RecordDecl *RD = RT->getDecl();
1977 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001978
Chris Lattnerc8b7b532010-07-29 07:30:00 +00001979 // If this is a C++ record, check the bases first.
1980 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
Aaron Ballman574705e2014-03-13 15:41:46 +00001981 for (const auto &I : CXXRD->bases()) {
1982 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
Chris Lattnerc8b7b532010-07-29 07:30:00 +00001983 "Unexpected base class!");
1984 const CXXRecordDecl *Base =
Aaron Ballman574705e2014-03-13 15:41:46 +00001985 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001986
Chris Lattnerc8b7b532010-07-29 07:30:00 +00001987 // If the base is after the span we care about, ignore it.
Benjamin Kramer2ef30312012-07-04 18:45:14 +00001988 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
Chris Lattnerc8b7b532010-07-29 07:30:00 +00001989 if (BaseOffset >= EndBit) continue;
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001990
Chris Lattnerc8b7b532010-07-29 07:30:00 +00001991 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
Aaron Ballman574705e2014-03-13 15:41:46 +00001992 if (!BitsContainNoUserData(I.getType(), BaseStart,
Chris Lattnerc8b7b532010-07-29 07:30:00 +00001993 EndBit-BaseOffset, Context))
1994 return false;
1995 }
1996 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00001997
Chris Lattnerc8b7b532010-07-29 07:30:00 +00001998 // Verify that no field has data that overlaps the region of interest. Yes
1999 // this could be sped up a lot by being smarter about queried fields,
2000 // however we're only looking at structs up to 16 bytes, so we don't care
2001 // much.
2002 unsigned idx = 0;
2003 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2004 i != e; ++i, ++idx) {
2005 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002006
Chris Lattnerc8b7b532010-07-29 07:30:00 +00002007 // If we found a field after the region we care about, then we're done.
2008 if (FieldOffset >= EndBit) break;
2009
2010 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2011 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
2012 Context))
2013 return false;
2014 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002015
Chris Lattnerc8b7b532010-07-29 07:30:00 +00002016 // If nothing in this record overlapped the area of interest, then we're
2017 // clean.
2018 return true;
2019 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002020
Chris Lattnerc8b7b532010-07-29 07:30:00 +00002021 return false;
2022}
2023
Chris Lattnere556a712010-07-29 18:39:32 +00002024/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
2025/// float member at the specified offset. For example, {int,{float}} has a
2026/// float at offset 4. It is conservatively correct for this routine to return
2027/// false.
Chris Lattner2192fe52011-07-18 04:24:23 +00002028static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
Micah Villmowdd31ca12012-10-08 16:25:52 +00002029 const llvm::DataLayout &TD) {
Chris Lattnere556a712010-07-29 18:39:32 +00002030 // Base case if we find a float.
2031 if (IROffset == 0 && IRType->isFloatTy())
2032 return true;
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002033
Chris Lattnere556a712010-07-29 18:39:32 +00002034 // If this is a struct, recurse into the field at the specified offset.
Chris Lattner2192fe52011-07-18 04:24:23 +00002035 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
Chris Lattnere556a712010-07-29 18:39:32 +00002036 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2037 unsigned Elt = SL->getElementContainingOffset(IROffset);
2038 IROffset -= SL->getElementOffset(Elt);
2039 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
2040 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002041
Chris Lattnere556a712010-07-29 18:39:32 +00002042 // If this is an array, recurse into the field at the specified offset.
Chris Lattner2192fe52011-07-18 04:24:23 +00002043 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2044 llvm::Type *EltTy = ATy->getElementType();
Chris Lattnere556a712010-07-29 18:39:32 +00002045 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2046 IROffset -= IROffset/EltSize*EltSize;
2047 return ContainsFloatAtOffset(EltTy, IROffset, TD);
2048 }
2049
2050 return false;
2051}
2052
Chris Lattner7f4b81a2010-07-29 18:13:09 +00002053
2054/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
2055/// low 8 bytes of an XMM register, corresponding to the SSE class.
Chris Lattnera5f58b02011-07-09 17:41:47 +00002056llvm::Type *X86_64ABIInfo::
2057GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
Chris Lattner7f4b81a2010-07-29 18:13:09 +00002058 QualType SourceTy, unsigned SourceOffset) const {
Chris Lattner50a357e2010-07-29 18:19:50 +00002059 // The only three choices we have are either double, <2 x float>, or float. We
Chris Lattner7f4b81a2010-07-29 18:13:09 +00002060 // pass as float if the last 4 bytes is just padding. This happens for
2061 // structs that contain 3 floats.
2062 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
2063 SourceOffset*8+64, getContext()))
2064 return llvm::Type::getFloatTy(getVMContext());
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002065
Chris Lattnere556a712010-07-29 18:39:32 +00002066 // We want to pass as <2 x float> if the LLVM IR type contains a float at
2067 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
2068 // case.
Micah Villmowdd31ca12012-10-08 16:25:52 +00002069 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
2070 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
Chris Lattner9f8b4512010-08-25 23:39:14 +00002071 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002072
Chris Lattner7f4b81a2010-07-29 18:13:09 +00002073 return llvm::Type::getDoubleTy(getVMContext());
2074}
2075
2076
Chris Lattner1c56d9a2010-07-29 17:40:35 +00002077/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
2078/// an 8-byte GPR. This means that we either have a scalar or we are talking
2079/// about the high or low part of an up-to-16-byte struct. This routine picks
2080/// the best LLVM IR type to represent this, which may be i64 or may be anything
Chris Lattnerb22f1c82010-07-28 22:44:07 +00002081/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
2082/// etc).
2083///
2084/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
2085/// the source type. IROffset is an offset in bytes into the LLVM IR type that
2086/// the 8-byte value references. PrefType may be null.
2087///
2088/// SourceTy is the source level type for the entire argument. SourceOffset is
2089/// an offset into this that we're processing (which is always either 0 or 8).
2090///
Chris Lattnera5f58b02011-07-09 17:41:47 +00002091llvm::Type *X86_64ABIInfo::
2092GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
Chris Lattner1c56d9a2010-07-29 17:40:35 +00002093 QualType SourceTy, unsigned SourceOffset) const {
Chris Lattnerc8b7b532010-07-29 07:30:00 +00002094 // If we're dealing with an un-offset LLVM IR type, then it means that we're
2095 // returning an 8-byte unit starting with it. See if we can safely use it.
2096 if (IROffset == 0) {
2097 // Pointers and int64's always fill the 8-byte unit.
Derek Schuffc7dd7222012-10-11 15:52:22 +00002098 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2099 IRType->isIntegerTy(64))
Chris Lattnerc8b7b532010-07-29 07:30:00 +00002100 return IRType;
Chris Lattnerb22f1c82010-07-28 22:44:07 +00002101
Chris Lattnerc8b7b532010-07-29 07:30:00 +00002102 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
2103 // goodness in the source type is just tail padding. This is allowed to
2104 // kick in for struct {double,int} on the int, but not on
2105 // struct{double,int,int} because we wouldn't return the second int. We
2106 // have to do this analysis on the source type because we can't depend on
2107 // unions being lowered a specific way etc.
2108 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
Derek Schuffc7dd7222012-10-11 15:52:22 +00002109 IRType->isIntegerTy(32) ||
2110 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2111 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2112 cast<llvm::IntegerType>(IRType)->getBitWidth();
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002113
Chris Lattnerc8b7b532010-07-29 07:30:00 +00002114 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
2115 SourceOffset*8+64, getContext()))
2116 return IRType;
2117 }
2118 }
Chris Lattnerb22f1c82010-07-28 22:44:07 +00002119
Chris Lattner2192fe52011-07-18 04:24:23 +00002120 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
Chris Lattnerb22f1c82010-07-28 22:44:07 +00002121 // If this is a struct, recurse into the field at the specified offset.
Micah Villmowdd31ca12012-10-08 16:25:52 +00002122 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
Chris Lattnerb22f1c82010-07-28 22:44:07 +00002123 if (IROffset < SL->getSizeInBytes()) {
2124 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2125 IROffset -= SL->getElementOffset(FieldIdx);
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002126
Chris Lattner1c56d9a2010-07-29 17:40:35 +00002127 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2128 SourceTy, SourceOffset);
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002129 }
Chris Lattnerb22f1c82010-07-28 22:44:07 +00002130 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002131
Chris Lattner2192fe52011-07-18 04:24:23 +00002132 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
Chris Lattnera5f58b02011-07-09 17:41:47 +00002133 llvm::Type *EltTy = ATy->getElementType();
Micah Villmowdd31ca12012-10-08 16:25:52 +00002134 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
Chris Lattner98076a22010-07-29 07:43:55 +00002135 unsigned EltOffset = IROffset/EltSize*EltSize;
Chris Lattner1c56d9a2010-07-29 17:40:35 +00002136 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2137 SourceOffset);
Chris Lattner98076a22010-07-29 07:43:55 +00002138 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002139
Chris Lattnerb22f1c82010-07-28 22:44:07 +00002140 // Okay, we don't have any better idea of what to pass, so we pass this in an
2141 // integer register that isn't too big to fit the rest of the struct.
Chris Lattner3f763422010-07-29 17:34:39 +00002142 unsigned TySizeInBytes =
2143 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
Chris Lattnerb22f1c82010-07-28 22:44:07 +00002144
Chris Lattner3f763422010-07-29 17:34:39 +00002145 assert(TySizeInBytes != SourceOffset && "Empty field?");
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002146
Chris Lattnerb22f1c82010-07-28 22:44:07 +00002147 // It is always safe to classify this as an integer type up to i64 that
2148 // isn't larger than the structure.
Chris Lattner3f763422010-07-29 17:34:39 +00002149 return llvm::IntegerType::get(getVMContext(),
2150 std::min(TySizeInBytes-SourceOffset, 8U)*8);
Chris Lattner22a931e2010-06-29 06:01:59 +00002151}
2152
Chris Lattnerd426c8e2010-09-01 00:50:20 +00002153
2154/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
2155/// be used as elements of a two register pair to pass or return, return a
2156/// first class aggregate to represent them. For example, if the low part of
2157/// a by-value argument should be passed as i32* and the high part as float,
2158/// return {i32*, float}.
Chris Lattnera5f58b02011-07-09 17:41:47 +00002159static llvm::Type *
Jay Foad7c57be32011-07-11 09:56:20 +00002160GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
Micah Villmowdd31ca12012-10-08 16:25:52 +00002161 const llvm::DataLayout &TD) {
Chris Lattnerd426c8e2010-09-01 00:50:20 +00002162 // In order to correctly satisfy the ABI, we need to the high part to start
2163 // at offset 8. If the high and low parts we inferred are both 4-byte types
2164 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
2165 // the second element at offset 8. Check for this:
2166 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
2167 unsigned HiAlign = TD.getABITypeAlignment(Hi);
Micah Villmowdd31ca12012-10-08 16:25:52 +00002168 unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign);
Chris Lattnerd426c8e2010-09-01 00:50:20 +00002169 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
Michael J. Spencerf5a1fbc2010-10-19 06:39:39 +00002170
Chris Lattnerd426c8e2010-09-01 00:50:20 +00002171 // To handle this, we have to increase the size of the low part so that the
2172 // second element will start at an 8 byte offset. We can't increase the size
2173 // of the second element because it might make us access off the end of the
2174 // struct.
2175 if (HiStart != 8) {
2176 // There are only two sorts of types the ABI generation code can produce for
2177 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32.
2178 // Promote these to a larger type.
2179 if (Lo->isFloatTy())
2180 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2181 else {
2182 assert(Lo->isIntegerTy() && "Invalid/unknown lo type");
2183 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2184 }
2185 }
Michael J. Spencerf5a1fbc2010-10-19 06:39:39 +00002186
Chris Lattnera5f58b02011-07-09 17:41:47 +00002187 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL);
Michael J. Spencerf5a1fbc2010-10-19 06:39:39 +00002188
2189
Chris Lattnerd426c8e2010-09-01 00:50:20 +00002190 // Verify that the second element is at an 8-byte offset.
2191 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2192 "Invalid x86-64 argument pair!");
2193 return Result;
2194}
2195
Chris Lattner31faff52010-07-28 23:06:14 +00002196ABIArgInfo X86_64ABIInfo::
Chris Lattner458b2aa2010-07-29 02:16:43 +00002197classifyReturnType(QualType RetTy) const {
Chris Lattner31faff52010-07-28 23:06:14 +00002198 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
2199 // classification algorithm.
2200 X86_64ABIInfo::Class Lo, Hi;
Eli Friedman96fd2642013-06-12 00:13:45 +00002201 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
Chris Lattner31faff52010-07-28 23:06:14 +00002202
2203 // Check some invariants.
2204 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
Chris Lattner31faff52010-07-28 23:06:14 +00002205 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2206
Chris Lattnera5f58b02011-07-09 17:41:47 +00002207 llvm::Type *ResType = 0;
Chris Lattner31faff52010-07-28 23:06:14 +00002208 switch (Lo) {
2209 case NoClass:
Chris Lattner8a2f3c72010-07-30 04:02:24 +00002210 if (Hi == NoClass)
2211 return ABIArgInfo::getIgnore();
2212 // If the low part is just padding, it takes no register, leave ResType
2213 // null.
2214 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2215 "Unknown missing lo part");
2216 break;
Chris Lattner31faff52010-07-28 23:06:14 +00002217
2218 case SSEUp:
2219 case X87Up:
David Blaikie83d382b2011-09-23 05:06:16 +00002220 llvm_unreachable("Invalid classification for lo word.");
Chris Lattner31faff52010-07-28 23:06:14 +00002221
2222 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
2223 // hidden argument.
2224 case Memory:
2225 return getIndirectReturnResult(RetTy);
2226
2227 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
2228 // available register of the sequence %rax, %rdx is used.
2229 case Integer:
Chris Lattnera5f58b02011-07-09 17:41:47 +00002230 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002231
Chris Lattner1f3a0632010-07-29 21:42:50 +00002232 // If we have a sign or zero extended integer, make sure to return Extend
2233 // so that the parameter gets the right LLVM IR attributes.
2234 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2235 // Treat an enum type as its underlying type.
2236 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2237 RetTy = EnumTy->getDecl()->getIntegerType();
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002238
Chris Lattner1f3a0632010-07-29 21:42:50 +00002239 if (RetTy->isIntegralOrEnumerationType() &&
2240 RetTy->isPromotableIntegerType())
2241 return ABIArgInfo::getExtend();
2242 }
Chris Lattner31faff52010-07-28 23:06:14 +00002243 break;
2244
2245 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
2246 // available SSE register of the sequence %xmm0, %xmm1 is used.
2247 case SSE:
Chris Lattnera5f58b02011-07-09 17:41:47 +00002248 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
Chris Lattnerfa560fe2010-07-28 23:12:33 +00002249 break;
Chris Lattner31faff52010-07-28 23:06:14 +00002250
2251 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
2252 // returned on the X87 stack in %st0 as 80-bit x87 number.
2253 case X87:
Chris Lattner2b037972010-07-29 02:01:43 +00002254 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
Chris Lattnerfa560fe2010-07-28 23:12:33 +00002255 break;
Chris Lattner31faff52010-07-28 23:06:14 +00002256
2257 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
2258 // part of the value is returned in %st0 and the imaginary part in
2259 // %st1.
2260 case ComplexX87:
2261 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
Chris Lattner845511f2011-06-18 22:49:11 +00002262 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
Chris Lattner2b037972010-07-29 02:01:43 +00002263 llvm::Type::getX86_FP80Ty(getVMContext()),
Chris Lattner31faff52010-07-28 23:06:14 +00002264 NULL);
2265 break;
2266 }
2267
Chris Lattnera5f58b02011-07-09 17:41:47 +00002268 llvm::Type *HighPart = 0;
Chris Lattner31faff52010-07-28 23:06:14 +00002269 switch (Hi) {
2270 // Memory was handled previously and X87 should
2271 // never occur as a hi class.
2272 case Memory:
2273 case X87:
David Blaikie83d382b2011-09-23 05:06:16 +00002274 llvm_unreachable("Invalid classification for hi word.");
Chris Lattner31faff52010-07-28 23:06:14 +00002275
2276 case ComplexX87: // Previously handled.
Chris Lattnerfa560fe2010-07-28 23:12:33 +00002277 case NoClass:
2278 break;
Chris Lattner31faff52010-07-28 23:06:14 +00002279
Chris Lattner52b3c132010-09-01 00:20:33 +00002280 case Integer:
Chris Lattnera5f58b02011-07-09 17:41:47 +00002281 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
Chris Lattner52b3c132010-09-01 00:20:33 +00002282 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2283 return ABIArgInfo::getDirect(HighPart, 8);
Chris Lattner31faff52010-07-28 23:06:14 +00002284 break;
Chris Lattner52b3c132010-09-01 00:20:33 +00002285 case SSE:
Chris Lattnera5f58b02011-07-09 17:41:47 +00002286 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
Chris Lattner52b3c132010-09-01 00:20:33 +00002287 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2288 return ABIArgInfo::getDirect(HighPart, 8);
Chris Lattner31faff52010-07-28 23:06:14 +00002289 break;
2290
2291 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
Bruno Cardoso Lopes21a41bb2011-07-11 22:41:29 +00002292 // is passed in the next available eightbyte chunk if the last used
2293 // vector register.
Chris Lattner31faff52010-07-28 23:06:14 +00002294 //
Chris Lattner57540c52011-04-15 05:22:18 +00002295 // SSEUP should always be preceded by SSE, just widen.
Chris Lattner31faff52010-07-28 23:06:14 +00002296 case SSEUp:
2297 assert(Lo == SSE && "Unexpected SSEUp classification.");
Bruno Cardoso Lopes21a41bb2011-07-11 22:41:29 +00002298 ResType = GetByteVectorType(RetTy);
Chris Lattner31faff52010-07-28 23:06:14 +00002299 break;
2300
2301 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
2302 // returned together with the previous X87 value in %st0.
2303 case X87Up:
Chris Lattner57540c52011-04-15 05:22:18 +00002304 // If X87Up is preceded by X87, we don't need to do
Chris Lattner31faff52010-07-28 23:06:14 +00002305 // anything. However, in some cases with unions it may not be
Chris Lattner57540c52011-04-15 05:22:18 +00002306 // preceded by X87. In such situations we follow gcc and pass the
Chris Lattner31faff52010-07-28 23:06:14 +00002307 // extra bits in an SSE reg.
Chris Lattnerc95a3982010-07-29 17:49:08 +00002308 if (Lo != X87) {
Chris Lattnera5f58b02011-07-09 17:41:47 +00002309 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
Chris Lattner52b3c132010-09-01 00:20:33 +00002310 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2311 return ABIArgInfo::getDirect(HighPart, 8);
Chris Lattnerc95a3982010-07-29 17:49:08 +00002312 }
Chris Lattner31faff52010-07-28 23:06:14 +00002313 break;
2314 }
Michael J. Spencerf5a1fbc2010-10-19 06:39:39 +00002315
Chris Lattner52b3c132010-09-01 00:20:33 +00002316 // If a high part was specified, merge it together with the low part. It is
Chris Lattnerbe5eb172010-09-01 00:24:35 +00002317 // known to pass in the high eightbyte of the result. We do this by forming a
2318 // first class struct aggregate with the high and low part: {low, high}
Chris Lattnerd426c8e2010-09-01 00:50:20 +00002319 if (HighPart)
Micah Villmowdd31ca12012-10-08 16:25:52 +00002320 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
Chris Lattner31faff52010-07-28 23:06:14 +00002321
Chris Lattner1f3a0632010-07-29 21:42:50 +00002322 return ABIArgInfo::getDirect(ResType);
Chris Lattner31faff52010-07-28 23:06:14 +00002323}
2324
Daniel Dunbarf07b5ec2012-03-10 01:03:58 +00002325ABIArgInfo X86_64ABIInfo::classifyArgumentType(
Eli Friedman96fd2642013-06-12 00:13:45 +00002326 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
2327 bool isNamedArg)
Daniel Dunbarf07b5ec2012-03-10 01:03:58 +00002328 const
2329{
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002330 X86_64ABIInfo::Class Lo, Hi;
Eli Friedman96fd2642013-06-12 00:13:45 +00002331 classify(Ty, 0, Lo, Hi, isNamedArg);
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002332
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002333 // Check some invariants.
2334 // FIXME: Enforce these by construction.
2335 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002336 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2337
2338 neededInt = 0;
2339 neededSSE = 0;
Chris Lattnera5f58b02011-07-09 17:41:47 +00002340 llvm::Type *ResType = 0;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002341 switch (Lo) {
2342 case NoClass:
Chris Lattner8a2f3c72010-07-30 04:02:24 +00002343 if (Hi == NoClass)
2344 return ABIArgInfo::getIgnore();
2345 // If the low part is just padding, it takes no register, leave ResType
2346 // null.
2347 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2348 "Unknown missing lo part");
2349 break;
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002350
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002351 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
2352 // on the stack.
2353 case Memory:
2354
2355 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
2356 // COMPLEX_X87, it is passed in memory.
2357 case X87:
2358 case ComplexX87:
Mark Lacey3825e832013-10-06 01:33:34 +00002359 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
Eli Friedman4774b7e2011-06-29 07:04:55 +00002360 ++neededInt;
Daniel Dunbarf07b5ec2012-03-10 01:03:58 +00002361 return getIndirectResult(Ty, freeIntRegs);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002362
2363 case SSEUp:
2364 case X87Up:
David Blaikie83d382b2011-09-23 05:06:16 +00002365 llvm_unreachable("Invalid classification for lo word.");
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002366
2367 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
2368 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
2369 // and %r9 is used.
2370 case Integer:
Chris Lattner22a931e2010-06-29 06:01:59 +00002371 ++neededInt;
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002372
Chris Lattnerb22f1c82010-07-28 22:44:07 +00002373 // Pick an 8-byte type based on the preferred type.
Chris Lattnera5f58b02011-07-09 17:41:47 +00002374 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
Chris Lattner1f3a0632010-07-29 21:42:50 +00002375
2376 // If we have a sign or zero extended integer, make sure to return Extend
2377 // so that the parameter gets the right LLVM IR attributes.
2378 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2379 // Treat an enum type as its underlying type.
2380 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2381 Ty = EnumTy->getDecl()->getIntegerType();
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002382
Chris Lattner1f3a0632010-07-29 21:42:50 +00002383 if (Ty->isIntegralOrEnumerationType() &&
2384 Ty->isPromotableIntegerType())
2385 return ABIArgInfo::getExtend();
2386 }
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002387
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002388 break;
2389
2390 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
2391 // available SSE register is used, the registers are taken in the
2392 // order from %xmm0 to %xmm7.
Bill Wendling5cd41c42010-10-18 03:41:31 +00002393 case SSE: {
Chris Lattnera5f58b02011-07-09 17:41:47 +00002394 llvm::Type *IRType = CGT.ConvertType(Ty);
Eli Friedman1310c682011-07-02 00:57:27 +00002395 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
Bill Wendling9987c0e2010-10-18 23:51:38 +00002396 ++neededSSE;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002397 break;
2398 }
Bill Wendling5cd41c42010-10-18 03:41:31 +00002399 }
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002400
Chris Lattnera5f58b02011-07-09 17:41:47 +00002401 llvm::Type *HighPart = 0;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002402 switch (Hi) {
2403 // Memory was handled previously, ComplexX87 and X87 should
Chris Lattner57540c52011-04-15 05:22:18 +00002404 // never occur as hi classes, and X87Up must be preceded by X87,
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002405 // which is passed in memory.
2406 case Memory:
2407 case X87:
2408 case ComplexX87:
David Blaikie83d382b2011-09-23 05:06:16 +00002409 llvm_unreachable("Invalid classification for hi word.");
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002410
2411 case NoClass: break;
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002412
Chris Lattnerbe5eb172010-09-01 00:24:35 +00002413 case Integer:
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002414 ++neededInt;
Chris Lattnerb22f1c82010-07-28 22:44:07 +00002415 // Pick an 8-byte type based on the preferred type.
Chris Lattnera5f58b02011-07-09 17:41:47 +00002416 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002417
Chris Lattnerbe5eb172010-09-01 00:24:35 +00002418 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
2419 return ABIArgInfo::getDirect(HighPart, 8);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002420 break;
2421
2422 // X87Up generally doesn't occur here (long double is passed in
2423 // memory), except in situations involving unions.
2424 case X87Up:
Chris Lattnerbe5eb172010-09-01 00:24:35 +00002425 case SSE:
Chris Lattnera5f58b02011-07-09 17:41:47 +00002426 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002427
Chris Lattnerbe5eb172010-09-01 00:24:35 +00002428 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
2429 return ABIArgInfo::getDirect(HighPart, 8);
Chris Lattner8a2f3c72010-07-30 04:02:24 +00002430
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002431 ++neededSSE;
2432 break;
2433
2434 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
2435 // eightbyte is passed in the upper half of the last used SSE
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002436 // register. This only happens when 128-bit vectors are passed.
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002437 case SSEUp:
Chris Lattnerf4ba08a2010-07-28 23:47:21 +00002438 assert(Lo == SSE && "Unexpected SSEUp classification");
Bruno Cardoso Lopes21a41bb2011-07-11 22:41:29 +00002439 ResType = GetByteVectorType(Ty);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002440 break;
2441 }
2442
Chris Lattnerbe5eb172010-09-01 00:24:35 +00002443 // If a high part was specified, merge it together with the low part. It is
2444 // known to pass in the high eightbyte of the result. We do this by forming a
2445 // first class struct aggregate with the high and low part: {low, high}
2446 if (HighPart)
Micah Villmowdd31ca12012-10-08 16:25:52 +00002447 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
Michael J. Spencerf5a1fbc2010-10-19 06:39:39 +00002448
Chris Lattner1f3a0632010-07-29 21:42:50 +00002449 return ABIArgInfo::getDirect(ResType);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002450}
2451
Chris Lattner22326a12010-07-29 02:31:05 +00002452void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002453
Chris Lattner458b2aa2010-07-29 02:16:43 +00002454 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002455
2456 // Keep track of the number of assigned registers.
Bill Wendling9987c0e2010-10-18 23:51:38 +00002457 unsigned freeIntRegs = 6, freeSSERegs = 8;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002458
2459 // If the return value is indirect, then the hidden argument is consuming one
2460 // integer register.
2461 if (FI.getReturnInfo().isIndirect())
2462 --freeIntRegs;
2463
Eli Friedman96fd2642013-06-12 00:13:45 +00002464 bool isVariadic = FI.isVariadic();
2465 unsigned numRequiredArgs = 0;
2466 if (isVariadic)
2467 numRequiredArgs = FI.getRequiredArgs().getNumRequiredArgs();
2468
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002469 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
2470 // get assigned (in left-to-right order) for passing as follows...
2471 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2472 it != ie; ++it) {
Eli Friedman96fd2642013-06-12 00:13:45 +00002473 bool isNamedArg = true;
2474 if (isVariadic)
Aaron Ballman6a302642013-06-12 15:03:45 +00002475 isNamedArg = (it - FI.arg_begin()) <
2476 static_cast<signed>(numRequiredArgs);
Eli Friedman96fd2642013-06-12 00:13:45 +00002477
Bill Wendling9987c0e2010-10-18 23:51:38 +00002478 unsigned neededInt, neededSSE;
Daniel Dunbarf07b5ec2012-03-10 01:03:58 +00002479 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
Eli Friedman96fd2642013-06-12 00:13:45 +00002480 neededSSE, isNamedArg);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002481
2482 // AMD64-ABI 3.2.3p3: If there are no registers available for any
2483 // eightbyte of an argument, the whole argument is passed on the
2484 // stack. If registers have already been assigned for some
2485 // eightbytes of such an argument, the assignments get reverted.
Bill Wendling9987c0e2010-10-18 23:51:38 +00002486 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002487 freeIntRegs -= neededInt;
2488 freeSSERegs -= neededSSE;
2489 } else {
Daniel Dunbarf07b5ec2012-03-10 01:03:58 +00002490 it->info = getIndirectResult(it->type, freeIntRegs);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002491 }
2492 }
2493}
2494
2495static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
2496 QualType Ty,
2497 CodeGenFunction &CGF) {
2498 llvm::Value *overflow_arg_area_p =
2499 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
2500 llvm::Value *overflow_arg_area =
2501 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
2502
2503 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
2504 // byte boundary if alignment needed by type exceeds 8 byte boundary.
Eli Friedmana1748562011-11-18 02:44:19 +00002505 // It isn't stated explicitly in the standard, but in practice we use
2506 // alignment greater than 16 where necessary.
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002507 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
2508 if (Align > 8) {
Eli Friedmana1748562011-11-18 02:44:19 +00002509 // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
Owen Anderson41a75022009-08-13 21:57:51 +00002510 llvm::Value *Offset =
Eli Friedmana1748562011-11-18 02:44:19 +00002511 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002512 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
2513 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
Chris Lattner5e016ae2010-06-27 07:15:29 +00002514 CGF.Int64Ty);
Eli Friedmana1748562011-11-18 02:44:19 +00002515 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002516 overflow_arg_area =
2517 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
2518 overflow_arg_area->getType(),
2519 "overflow_arg_area.align");
2520 }
2521
2522 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
Chris Lattner2192fe52011-07-18 04:24:23 +00002523 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002524 llvm::Value *Res =
2525 CGF.Builder.CreateBitCast(overflow_arg_area,
Owen Anderson9793f0e2009-07-29 22:16:19 +00002526 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002527
2528 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
2529 // l->overflow_arg_area + sizeof(type).
2530 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
2531 // an 8 byte boundary.
2532
2533 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
Owen Anderson41a75022009-08-13 21:57:51 +00002534 llvm::Value *Offset =
Chris Lattner5e016ae2010-06-27 07:15:29 +00002535 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002536 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
2537 "overflow_arg_area.next");
2538 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
2539
2540 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
2541 return Res;
2542}
2543
2544llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2545 CodeGenFunction &CGF) const {
2546 // Assume that va_list type is correct; should be pointer to LLVM type:
2547 // struct {
2548 // i32 gp_offset;
2549 // i32 fp_offset;
2550 // i8* overflow_arg_area;
2551 // i8* reg_save_area;
2552 // };
Bill Wendling9987c0e2010-10-18 23:51:38 +00002553 unsigned neededInt, neededSSE;
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002554
Chris Lattner9723d6c2010-03-11 18:19:55 +00002555 Ty = CGF.getContext().getCanonicalType(Ty);
Eli Friedman96fd2642013-06-12 00:13:45 +00002556 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
2557 /*isNamedArg*/false);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002558
2559 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
2560 // in the registers. If not go to step 7.
2561 if (!neededInt && !neededSSE)
2562 return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2563
2564 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
2565 // general purpose registers needed to pass type and num_fp to hold
2566 // the number of floating point registers needed.
2567
2568 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
2569 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
2570 // l->fp_offset > 304 - num_fp * 16 go to step 7.
2571 //
2572 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
2573 // register save space).
2574
2575 llvm::Value *InRegs = 0;
2576 llvm::Value *gp_offset_p = 0, *gp_offset = 0;
2577 llvm::Value *fp_offset_p = 0, *fp_offset = 0;
2578 if (neededInt) {
2579 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
2580 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
Chris Lattnerd776fb12010-06-28 21:43:59 +00002581 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
2582 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002583 }
2584
2585 if (neededSSE) {
2586 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
2587 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
2588 llvm::Value *FitsInFP =
Chris Lattnerd776fb12010-06-28 21:43:59 +00002589 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
2590 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002591 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
2592 }
2593
2594 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
2595 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
2596 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
2597 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
2598
2599 // Emit code to load the value if it was passed in registers.
2600
2601 CGF.EmitBlock(InRegBlock);
2602
2603 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
2604 // an offset of l->gp_offset and/or l->fp_offset. This may require
2605 // copying to a temporary location in case the parameter is passed
2606 // in different register classes or requires an alignment greater
2607 // than 8 for general purpose registers and 16 for XMM registers.
2608 //
2609 // FIXME: This really results in shameful code when we end up needing to
2610 // collect arguments from different places; often what should result in a
2611 // simple assembling of a structure from scattered addresses has many more
2612 // loads than necessary. Can we clean this up?
Chris Lattner2192fe52011-07-18 04:24:23 +00002613 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002614 llvm::Value *RegAddr =
2615 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
2616 "reg_save_area");
2617 if (neededInt && neededSSE) {
2618 // FIXME: Cleanup.
Chris Lattnerfe34c1d2010-07-29 06:26:06 +00002619 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
Chris Lattner2192fe52011-07-18 04:24:23 +00002620 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
Eli Friedmanc11c1692013-06-07 23:20:55 +00002621 llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2622 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002623 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
Chris Lattner2192fe52011-07-18 04:24:23 +00002624 llvm::Type *TyLo = ST->getElementType(0);
2625 llvm::Type *TyHi = ST->getElementType(1);
Chris Lattner51e1cc22010-08-26 06:28:35 +00002626 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002627 "Unexpected ABI info for mixed regs");
Chris Lattner2192fe52011-07-18 04:24:23 +00002628 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
2629 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002630 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2631 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
Duncan Sands998f9d92010-02-15 16:14:01 +00002632 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr;
2633 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002634 llvm::Value *V =
2635 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
2636 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2637 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
2638 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2639
Owen Anderson170229f2009-07-14 23:10:40 +00002640 RegAddr = CGF.Builder.CreateBitCast(Tmp,
Owen Anderson9793f0e2009-07-29 22:16:19 +00002641 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002642 } else if (neededInt) {
2643 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2644 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
Owen Anderson9793f0e2009-07-29 22:16:19 +00002645 llvm::PointerType::getUnqual(LTy));
Eli Friedmanc11c1692013-06-07 23:20:55 +00002646
2647 // Copy to a temporary if necessary to ensure the appropriate alignment.
2648 std::pair<CharUnits, CharUnits> SizeAlign =
2649 CGF.getContext().getTypeInfoInChars(Ty);
2650 uint64_t TySize = SizeAlign.first.getQuantity();
2651 unsigned TyAlign = SizeAlign.second.getQuantity();
2652 if (TyAlign > 8) {
Eli Friedmanc11c1692013-06-07 23:20:55 +00002653 llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2654 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false);
2655 RegAddr = Tmp;
2656 }
Chris Lattner0cf24192010-06-28 20:05:43 +00002657 } else if (neededSSE == 1) {
2658 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2659 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2660 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002661 } else {
Chris Lattner0cf24192010-06-28 20:05:43 +00002662 assert(neededSSE == 2 && "Invalid number of needed registers!");
2663 // SSE registers are spaced 16 bytes apart in the register save
2664 // area, we need to collect the two eightbytes together.
2665 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
Chris Lattnerd776fb12010-06-28 21:43:59 +00002666 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
Chris Lattnerece04092012-02-07 00:39:47 +00002667 llvm::Type *DoubleTy = CGF.DoubleTy;
Chris Lattner2192fe52011-07-18 04:24:23 +00002668 llvm::Type *DblPtrTy =
Chris Lattner0cf24192010-06-28 20:05:43 +00002669 llvm::PointerType::getUnqual(DoubleTy);
Eli Friedmanc11c1692013-06-07 23:20:55 +00002670 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, NULL);
2671 llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty);
2672 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
Chris Lattner0cf24192010-06-28 20:05:43 +00002673 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
2674 DblPtrTy));
2675 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2676 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
2677 DblPtrTy));
2678 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2679 RegAddr = CGF.Builder.CreateBitCast(Tmp,
2680 llvm::PointerType::getUnqual(LTy));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002681 }
2682
2683 // AMD64-ABI 3.5.7p5: Step 5. Set:
2684 // l->gp_offset = l->gp_offset + num_gp * 8
2685 // l->fp_offset = l->fp_offset + num_fp * 16.
2686 if (neededInt) {
Chris Lattner5e016ae2010-06-27 07:15:29 +00002687 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002688 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
2689 gp_offset_p);
2690 }
2691 if (neededSSE) {
Chris Lattner5e016ae2010-06-27 07:15:29 +00002692 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002693 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
2694 fp_offset_p);
2695 }
2696 CGF.EmitBranch(ContBlock);
2697
2698 // Emit code to load the value if it was passed in memory.
2699
2700 CGF.EmitBlock(InMemBlock);
2701 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2702
2703 // Return the appropriate result.
2704
2705 CGF.EmitBlock(ContBlock);
Jay Foad20c0f022011-03-30 11:28:58 +00002706 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002707 "vaarg.addr");
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002708 ResAddr->addIncoming(RegAddr, InRegBlock);
2709 ResAddr->addIncoming(MemAddr, InMemBlock);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00002710 return ResAddr;
2711}
2712
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +00002713ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, bool IsReturnType) const {
NAKAMURA Takumibd91f502011-01-17 22:56:31 +00002714
2715 if (Ty->isVoidType())
2716 return ABIArgInfo::getIgnore();
2717
2718 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2719 Ty = EnumTy->getDecl()->getIntegerType();
2720
2721 uint64_t Size = getContext().getTypeSize(Ty);
2722
Reid Kleckner9005f412014-05-02 00:51:20 +00002723 const RecordType *RT = Ty->getAs<RecordType>();
2724 if (RT) {
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +00002725 if (IsReturnType) {
Mark Lacey3825e832013-10-06 01:33:34 +00002726 if (isRecordReturnIndirect(RT, getCXXABI()))
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +00002727 return ABIArgInfo::getIndirect(0, false);
2728 } else {
Mark Lacey3825e832013-10-06 01:33:34 +00002729 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +00002730 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
2731 }
2732
2733 if (RT->getDecl()->hasFlexibleArrayMember())
NAKAMURA Takumibd91f502011-01-17 22:56:31 +00002734 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2735
NAKAMURA Takumif8a6e802011-02-22 03:56:57 +00002736 // FIXME: mingw-w64-gcc emits 128-bit struct as i128
Saleem Abdulrasool377066a2014-03-27 22:50:18 +00002737 if (Size == 128 && getTarget().getTriple().isWindowsGNUEnvironment())
NAKAMURA Takumif8a6e802011-02-22 03:56:57 +00002738 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2739 Size));
Reid Kleckner9005f412014-05-02 00:51:20 +00002740 }
NAKAMURA Takumif8a6e802011-02-22 03:56:57 +00002741
Reid Klecknerec87fec2014-05-02 01:17:12 +00002742 if (Ty->isMemberPointerType()) {
Reid Kleckner7f5f0f32014-05-02 01:14:59 +00002743 // If the member pointer is represented by an LLVM int or ptr, pass it
2744 // directly.
2745 llvm::Type *LLTy = CGT.ConvertType(Ty);
2746 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
2747 return ABIArgInfo::getDirect();
Reid Kleckner9005f412014-05-02 00:51:20 +00002748 }
2749
2750 if (RT || Ty->isMemberPointerType()) {
NAKAMURA Takumif8a6e802011-02-22 03:56:57 +00002751 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
2752 // not 1, 2, 4, or 8 bytes, must be passed by reference."
Reid Kleckner9005f412014-05-02 00:51:20 +00002753 if (Size > 64 || !llvm::isPowerOf2_64(Size))
2754 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
NAKAMURA Takumibd91f502011-01-17 22:56:31 +00002755
Reid Kleckner9005f412014-05-02 00:51:20 +00002756 // Otherwise, coerce it to a small integer.
2757 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
NAKAMURA Takumibd91f502011-01-17 22:56:31 +00002758 }
2759
2760 if (Ty->isPromotableIntegerType())
2761 return ABIArgInfo::getExtend();
2762
2763 return ABIArgInfo::getDirect();
2764}
2765
2766void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2767
2768 QualType RetTy = FI.getReturnType();
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +00002769 FI.getReturnInfo() = classify(RetTy, true);
NAKAMURA Takumibd91f502011-01-17 22:56:31 +00002770
Aaron Ballmanec47bc22014-03-17 18:10:01 +00002771 for (auto &I : FI.arguments())
2772 I.info = classify(I.type, false);
NAKAMURA Takumibd91f502011-01-17 22:56:31 +00002773}
2774
Chris Lattner04dc9572010-08-31 16:44:54 +00002775llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2776 CodeGenFunction &CGF) const {
Chris Lattnerece04092012-02-07 00:39:47 +00002777 llvm::Type *BPP = CGF.Int8PtrPtrTy;
Chris Lattner0cf24192010-06-28 20:05:43 +00002778
Chris Lattner04dc9572010-08-31 16:44:54 +00002779 CGBuilderTy &Builder = CGF.Builder;
2780 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
2781 "ap");
2782 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2783 llvm::Type *PTy =
2784 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2785 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
2786
2787 uint64_t Offset =
2788 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
2789 llvm::Value *NextAddr =
2790 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
2791 "ap.next");
2792 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2793
2794 return AddrTyped;
2795}
Chris Lattner0cf24192010-06-28 20:05:43 +00002796
Benjamin Kramer1cdb23d2012-10-20 13:02:06 +00002797namespace {
2798
Derek Schuffa2020962012-10-16 22:30:41 +00002799class NaClX86_64ABIInfo : public ABIInfo {
2800 public:
2801 NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
2802 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {}
Craig Topper4f12f102014-03-12 06:41:41 +00002803 void computeInfo(CGFunctionInfo &FI) const override;
2804 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2805 CodeGenFunction &CGF) const override;
Derek Schuffa2020962012-10-16 22:30:41 +00002806 private:
2807 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
2808 X86_64ABIInfo NInfo; // Used for everything else.
2809};
2810
2811class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2812 public:
2813 NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
2814 : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {}
2815};
2816
Benjamin Kramer1cdb23d2012-10-20 13:02:06 +00002817}
2818
Derek Schuffa2020962012-10-16 22:30:41 +00002819void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2820 if (FI.getASTCallingConvention() == CC_PnaclCall)
2821 PInfo.computeInfo(FI);
2822 else
2823 NInfo.computeInfo(FI);
2824}
2825
2826llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2827 CodeGenFunction &CGF) const {
2828 // Always use the native convention; calling pnacl-style varargs functions
2829 // is unuspported.
2830 return NInfo.EmitVAArg(VAListAddr, Ty, CGF);
2831}
2832
2833
John McCallea8d8bb2010-03-11 00:10:12 +00002834// PowerPC-32
2835
2836namespace {
2837class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2838public:
Chris Lattner2b037972010-07-29 02:01:43 +00002839 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002840
Craig Topper4f12f102014-03-12 06:41:41 +00002841 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
John McCallea8d8bb2010-03-11 00:10:12 +00002842 // This is recovered from gcc output.
2843 return 1; // r1 is the dedicated stack pointer
2844 }
2845
2846 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
Craig Topper4f12f102014-03-12 06:41:41 +00002847 llvm::Value *Address) const override;
John McCallea8d8bb2010-03-11 00:10:12 +00002848};
2849
2850}
2851
2852bool
2853PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2854 llvm::Value *Address) const {
2855 // This is calculated from the LLVM and GCC tables and verified
2856 // against gcc output. AFAIK all ABIs use the same encoding.
2857
2858 CodeGen::CGBuilderTy &Builder = CGF.Builder;
John McCallea8d8bb2010-03-11 00:10:12 +00002859
Chris Lattnerece04092012-02-07 00:39:47 +00002860 llvm::IntegerType *i8 = CGF.Int8Ty;
John McCallea8d8bb2010-03-11 00:10:12 +00002861 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2862 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
2863 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
2864
2865 // 0-31: r0-31, the 4-byte general-purpose registers
John McCall943fae92010-05-27 06:19:26 +00002866 AssignToArrayRange(Builder, Address, Four8, 0, 31);
John McCallea8d8bb2010-03-11 00:10:12 +00002867
2868 // 32-63: fp0-31, the 8-byte floating-point registers
John McCall943fae92010-05-27 06:19:26 +00002869 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
John McCallea8d8bb2010-03-11 00:10:12 +00002870
2871 // 64-76 are various 4-byte special-purpose registers:
2872 // 64: mq
2873 // 65: lr
2874 // 66: ctr
2875 // 67: ap
2876 // 68-75 cr0-7
2877 // 76: xer
John McCall943fae92010-05-27 06:19:26 +00002878 AssignToArrayRange(Builder, Address, Four8, 64, 76);
John McCallea8d8bb2010-03-11 00:10:12 +00002879
2880 // 77-108: v0-31, the 16-byte vector registers
John McCall943fae92010-05-27 06:19:26 +00002881 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
John McCallea8d8bb2010-03-11 00:10:12 +00002882
2883 // 109: vrsave
2884 // 110: vscr
2885 // 111: spe_acc
2886 // 112: spefscr
2887 // 113: sfp
John McCall943fae92010-05-27 06:19:26 +00002888 AssignToArrayRange(Builder, Address, Four8, 109, 113);
John McCallea8d8bb2010-03-11 00:10:12 +00002889
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00002890 return false;
John McCallea8d8bb2010-03-11 00:10:12 +00002891}
2892
Roman Divackyd966e722012-05-09 18:22:46 +00002893// PowerPC-64
2894
2895namespace {
Bill Schmidt25cb3492012-10-03 19:18:57 +00002896/// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
2897class PPC64_SVR4_ABIInfo : public DefaultABIInfo {
2898
2899public:
2900 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
2901
Ulrich Weigand77ed89d2012-11-05 19:13:42 +00002902 bool isPromotableTypeForABI(QualType Ty) const;
2903
2904 ABIArgInfo classifyReturnType(QualType RetTy) const;
2905 ABIArgInfo classifyArgumentType(QualType Ty) const;
2906
Bill Schmidt84d37792012-10-12 19:26:17 +00002907 // TODO: We can add more logic to computeInfo to improve performance.
2908 // Example: For aggregate arguments that fit in a register, we could
2909 // use getDirectInReg (as is done below for structs containing a single
2910 // floating-point value) to avoid pushing them to memory on function
2911 // entry. This would require changing the logic in PPCISelLowering
2912 // when lowering the parameters in the caller and args in the callee.
Craig Topper4f12f102014-03-12 06:41:41 +00002913 void computeInfo(CGFunctionInfo &FI) const override {
Bill Schmidt84d37792012-10-12 19:26:17 +00002914 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
Aaron Ballmanec47bc22014-03-17 18:10:01 +00002915 for (auto &I : FI.arguments()) {
Bill Schmidt84d37792012-10-12 19:26:17 +00002916 // We rely on the default argument classification for the most part.
2917 // One exception: An aggregate containing a single floating-point
Bill Schmidt179afae2013-07-23 22:15:57 +00002918 // or vector item must be passed in a register if one is available.
Aaron Ballmanec47bc22014-03-17 18:10:01 +00002919 const Type *T = isSingleElementStruct(I.type, getContext());
Bill Schmidt84d37792012-10-12 19:26:17 +00002920 if (T) {
2921 const BuiltinType *BT = T->getAs<BuiltinType>();
Bill Schmidt179afae2013-07-23 22:15:57 +00002922 if (T->isVectorType() || (BT && BT->isFloatingPoint())) {
Bill Schmidt84d37792012-10-12 19:26:17 +00002923 QualType QT(T, 0);
Aaron Ballmanec47bc22014-03-17 18:10:01 +00002924 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
Bill Schmidt84d37792012-10-12 19:26:17 +00002925 continue;
2926 }
2927 }
Aaron Ballmanec47bc22014-03-17 18:10:01 +00002928 I.info = classifyArgumentType(I.type);
Bill Schmidt84d37792012-10-12 19:26:17 +00002929 }
2930 }
Bill Schmidt25cb3492012-10-03 19:18:57 +00002931
Craig Topper4f12f102014-03-12 06:41:41 +00002932 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2933 CodeGenFunction &CGF) const override;
Bill Schmidt25cb3492012-10-03 19:18:57 +00002934};
2935
2936class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
2937public:
2938 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT)
2939 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT)) {}
2940
Craig Topper4f12f102014-03-12 06:41:41 +00002941 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
Bill Schmidt25cb3492012-10-03 19:18:57 +00002942 // This is recovered from gcc output.
2943 return 1; // r1 is the dedicated stack pointer
2944 }
2945
2946 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
Craig Topper4f12f102014-03-12 06:41:41 +00002947 llvm::Value *Address) const override;
Bill Schmidt25cb3492012-10-03 19:18:57 +00002948};
2949
Roman Divackyd966e722012-05-09 18:22:46 +00002950class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2951public:
2952 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
2953
Craig Topper4f12f102014-03-12 06:41:41 +00002954 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
Roman Divackyd966e722012-05-09 18:22:46 +00002955 // This is recovered from gcc output.
2956 return 1; // r1 is the dedicated stack pointer
2957 }
2958
2959 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
Craig Topper4f12f102014-03-12 06:41:41 +00002960 llvm::Value *Address) const override;
Roman Divackyd966e722012-05-09 18:22:46 +00002961};
2962
2963}
2964
Ulrich Weigand77ed89d2012-11-05 19:13:42 +00002965// Return true if the ABI requires Ty to be passed sign- or zero-
2966// extended to 64 bits.
2967bool
2968PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
2969 // Treat an enum type as its underlying type.
2970 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2971 Ty = EnumTy->getDecl()->getIntegerType();
2972
2973 // Promotable integer types are required to be promoted by the ABI.
2974 if (Ty->isPromotableIntegerType())
2975 return true;
2976
2977 // In addition to the usual promotable integer types, we also need to
2978 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
2979 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
2980 switch (BT->getKind()) {
2981 case BuiltinType::Int:
2982 case BuiltinType::UInt:
2983 return true;
2984 default:
2985 break;
2986 }
2987
2988 return false;
2989}
2990
2991ABIArgInfo
2992PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
Bill Schmidt90b22c92012-11-27 02:46:43 +00002993 if (Ty->isAnyComplexType())
2994 return ABIArgInfo::getDirect();
2995
Ulrich Weigand77ed89d2012-11-05 19:13:42 +00002996 if (isAggregateTypeForABI(Ty)) {
Mark Lacey3825e832013-10-06 01:33:34 +00002997 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +00002998 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
Ulrich Weigand77ed89d2012-11-05 19:13:42 +00002999
3000 return ABIArgInfo::getIndirect(0);
3001 }
3002
3003 return (isPromotableTypeForABI(Ty) ?
3004 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3005}
3006
3007ABIArgInfo
3008PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
3009 if (RetTy->isVoidType())
3010 return ABIArgInfo::getIgnore();
3011
Bill Schmidta3d121c2012-12-17 04:20:17 +00003012 if (RetTy->isAnyComplexType())
3013 return ABIArgInfo::getDirect();
3014
Ulrich Weigand77ed89d2012-11-05 19:13:42 +00003015 if (isAggregateTypeForABI(RetTy))
3016 return ABIArgInfo::getIndirect(0);
3017
3018 return (isPromotableTypeForABI(RetTy) ?
3019 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3020}
3021
Bill Schmidt25cb3492012-10-03 19:18:57 +00003022// Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
3023llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
3024 QualType Ty,
3025 CodeGenFunction &CGF) const {
3026 llvm::Type *BP = CGF.Int8PtrTy;
3027 llvm::Type *BPP = CGF.Int8PtrPtrTy;
3028
3029 CGBuilderTy &Builder = CGF.Builder;
3030 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
3031 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3032
Bill Schmidt924c4782013-01-14 17:45:36 +00003033 // Update the va_list pointer. The pointer should be bumped by the
3034 // size of the object. We can trust getTypeSize() except for a complex
3035 // type whose base type is smaller than a doubleword. For these, the
3036 // size of the object is 16 bytes; see below for further explanation.
Bill Schmidt25cb3492012-10-03 19:18:57 +00003037 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8;
Bill Schmidt924c4782013-01-14 17:45:36 +00003038 QualType BaseTy;
3039 unsigned CplxBaseSize = 0;
3040
3041 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
3042 BaseTy = CTy->getElementType();
3043 CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8;
3044 if (CplxBaseSize < 8)
3045 SizeInBytes = 16;
3046 }
3047
Bill Schmidt25cb3492012-10-03 19:18:57 +00003048 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8);
3049 llvm::Value *NextAddr =
3050 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset),
3051 "ap.next");
3052 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3053
Bill Schmidt924c4782013-01-14 17:45:36 +00003054 // If we have a complex type and the base type is smaller than 8 bytes,
3055 // the ABI calls for the real and imaginary parts to be right-adjusted
3056 // in separate doublewords. However, Clang expects us to produce a
3057 // pointer to a structure with the two parts packed tightly. So generate
3058 // loads of the real and imaginary parts relative to the va_list pointer,
3059 // and store them to a temporary structure.
3060 if (CplxBaseSize && CplxBaseSize < 8) {
3061 llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3062 llvm::Value *ImagAddr = RealAddr;
3063 RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize));
3064 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize));
3065 llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy));
3066 RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy);
3067 ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy);
3068 llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal");
3069 llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag");
3070 llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty),
3071 "vacplx");
3072 llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real");
3073 llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag");
3074 Builder.CreateStore(Real, RealPtr, false);
3075 Builder.CreateStore(Imag, ImagPtr, false);
3076 return Ptr;
3077 }
3078
Bill Schmidt25cb3492012-10-03 19:18:57 +00003079 // If the argument is smaller than 8 bytes, it is right-adjusted in
3080 // its doubleword slot. Adjust the pointer to pick it up from the
3081 // correct offset.
3082 if (SizeInBytes < 8) {
3083 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3084 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
3085 Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
3086 }
3087
3088 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3089 return Builder.CreateBitCast(Addr, PTy);
3090}
3091
3092static bool
3093PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3094 llvm::Value *Address) {
Roman Divackyd966e722012-05-09 18:22:46 +00003095 // This is calculated from the LLVM and GCC tables and verified
3096 // against gcc output. AFAIK all ABIs use the same encoding.
3097
3098 CodeGen::CGBuilderTy &Builder = CGF.Builder;
3099
3100 llvm::IntegerType *i8 = CGF.Int8Ty;
3101 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3102 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3103 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3104
3105 // 0-31: r0-31, the 8-byte general-purpose registers
3106 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
3107
3108 // 32-63: fp0-31, the 8-byte floating-point registers
3109 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
3110
3111 // 64-76 are various 4-byte special-purpose registers:
3112 // 64: mq
3113 // 65: lr
3114 // 66: ctr
3115 // 67: ap
3116 // 68-75 cr0-7
3117 // 76: xer
3118 AssignToArrayRange(Builder, Address, Four8, 64, 76);
3119
3120 // 77-108: v0-31, the 16-byte vector registers
3121 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3122
3123 // 109: vrsave
3124 // 110: vscr
3125 // 111: spe_acc
3126 // 112: spefscr
3127 // 113: sfp
3128 AssignToArrayRange(Builder, Address, Four8, 109, 113);
3129
3130 return false;
3131}
John McCallea8d8bb2010-03-11 00:10:12 +00003132
Bill Schmidt25cb3492012-10-03 19:18:57 +00003133bool
3134PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
3135 CodeGen::CodeGenFunction &CGF,
3136 llvm::Value *Address) const {
3137
3138 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3139}
3140
3141bool
3142PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3143 llvm::Value *Address) const {
3144
3145 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3146}
3147
Chris Lattner0cf24192010-06-28 20:05:43 +00003148//===----------------------------------------------------------------------===//
Tim Northovera2ee4332014-03-29 15:09:45 +00003149// ARM64 ABI Implementation
3150//===----------------------------------------------------------------------===//
3151
3152namespace {
3153
3154class ARM64ABIInfo : public ABIInfo {
3155public:
3156 enum ABIKind {
3157 AAPCS = 0,
3158 DarwinPCS
3159 };
3160
3161private:
3162 ABIKind Kind;
3163
3164public:
3165 ARM64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
3166
3167private:
3168 ABIKind getABIKind() const { return Kind; }
3169 bool isDarwinPCS() const { return Kind == DarwinPCS; }
3170
3171 ABIArgInfo classifyReturnType(QualType RetTy) const;
3172 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &AllocatedVFP,
3173 bool &IsHA, unsigned &AllocatedGPR,
Bob Wilson373af732014-04-21 01:23:39 +00003174 bool &IsSmallAggr, bool IsNamedArg) const;
Tim Northovera2ee4332014-03-29 15:09:45 +00003175 bool isIllegalVectorType(QualType Ty) const;
3176
3177 virtual void computeInfo(CGFunctionInfo &FI) const {
3178 // To correctly handle Homogeneous Aggregate, we need to keep track of the
3179 // number of SIMD and Floating-point registers allocated so far.
3180 // If the argument is an HFA or an HVA and there are sufficient unallocated
3181 // SIMD and Floating-point registers, then the argument is allocated to SIMD
3182 // and Floating-point Registers (with one register per member of the HFA or
3183 // HVA). Otherwise, the NSRN is set to 8.
3184 unsigned AllocatedVFP = 0;
Bob Wilson373af732014-04-21 01:23:39 +00003185
Tim Northovera2ee4332014-03-29 15:09:45 +00003186 // To correctly handle small aggregates, we need to keep track of the number
3187 // of GPRs allocated so far. If the small aggregate can't all fit into
3188 // registers, it will be on stack. We don't allow the aggregate to be
3189 // partially in registers.
3190 unsigned AllocatedGPR = 0;
Bob Wilson373af732014-04-21 01:23:39 +00003191
3192 // Find the number of named arguments. Variadic arguments get special
3193 // treatment with the Darwin ABI.
3194 unsigned NumRequiredArgs = (FI.isVariadic() ?
3195 FI.getRequiredArgs().getNumRequiredArgs() :
3196 FI.arg_size());
3197
Tim Northovera2ee4332014-03-29 15:09:45 +00003198 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3199 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3200 it != ie; ++it) {
3201 unsigned PreAllocation = AllocatedVFP, PreGPR = AllocatedGPR;
3202 bool IsHA = false, IsSmallAggr = false;
3203 const unsigned NumVFPs = 8;
3204 const unsigned NumGPRs = 8;
Bob Wilson373af732014-04-21 01:23:39 +00003205 bool IsNamedArg = ((it - FI.arg_begin()) <
3206 static_cast<signed>(NumRequiredArgs));
Tim Northovera2ee4332014-03-29 15:09:45 +00003207 it->info = classifyArgumentType(it->type, AllocatedVFP, IsHA,
Bob Wilson373af732014-04-21 01:23:39 +00003208 AllocatedGPR, IsSmallAggr, IsNamedArg);
Tim Northover5ffc0922014-04-17 10:20:38 +00003209
3210 // Under AAPCS the 64-bit stack slot alignment means we can't pass HAs
3211 // as sequences of floats since they'll get "holes" inserted as
3212 // padding by the back end.
Tim Northover07f16242014-04-18 10:47:44 +00003213 if (IsHA && AllocatedVFP > NumVFPs && !isDarwinPCS() &&
3214 getContext().getTypeAlign(it->type) < 64) {
3215 uint32_t NumStackSlots = getContext().getTypeSize(it->type);
3216 NumStackSlots = llvm::RoundUpToAlignment(NumStackSlots, 64) / 64;
Tim Northover5ffc0922014-04-17 10:20:38 +00003217
Tim Northover07f16242014-04-18 10:47:44 +00003218 llvm::Type *CoerceTy = llvm::ArrayType::get(
3219 llvm::Type::getDoubleTy(getVMContext()), NumStackSlots);
3220 it->info = ABIArgInfo::getDirect(CoerceTy);
Tim Northover5ffc0922014-04-17 10:20:38 +00003221 }
3222
Tim Northovera2ee4332014-03-29 15:09:45 +00003223 // If we do not have enough VFP registers for the HA, any VFP registers
3224 // that are unallocated are marked as unavailable. To achieve this, we add
3225 // padding of (NumVFPs - PreAllocation) floats.
3226 if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) {
3227 llvm::Type *PaddingTy = llvm::ArrayType::get(
3228 llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation);
Tim Northover5ffc0922014-04-17 10:20:38 +00003229 it->info.setPaddingType(PaddingTy);
Tim Northovera2ee4332014-03-29 15:09:45 +00003230 }
Tim Northover5ffc0922014-04-17 10:20:38 +00003231
Tim Northovera2ee4332014-03-29 15:09:45 +00003232 // If we do not have enough GPRs for the small aggregate, any GPR regs
3233 // that are unallocated are marked as unavailable.
3234 if (IsSmallAggr && AllocatedGPR > NumGPRs && PreGPR < NumGPRs) {
3235 llvm::Type *PaddingTy = llvm::ArrayType::get(
3236 llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreGPR);
3237 it->info =
3238 ABIArgInfo::getDirect(it->info.getCoerceToType(), 0, PaddingTy);
3239 }
3240 }
3241 }
3242
3243 llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
3244 CodeGenFunction &CGF) const;
3245
3246 llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
3247 CodeGenFunction &CGF) const;
3248
3249 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3250 CodeGenFunction &CGF) const {
3251 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
3252 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
3253 }
3254};
3255
3256class ARM64TargetCodeGenInfo : public TargetCodeGenInfo {
3257public:
3258 ARM64TargetCodeGenInfo(CodeGenTypes &CGT, ARM64ABIInfo::ABIKind Kind)
3259 : TargetCodeGenInfo(new ARM64ABIInfo(CGT, Kind)) {}
3260
3261 StringRef getARCRetainAutoreleasedReturnValueMarker() const {
3262 return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
3263 }
3264
3265 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { return 31; }
3266
3267 virtual bool doesReturnSlotInterfereWithArgs() const { return false; }
3268};
3269}
3270
3271static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
3272 ASTContext &Context,
3273 uint64_t *HAMembers = 0);
3274
3275ABIArgInfo ARM64ABIInfo::classifyArgumentType(QualType Ty,
3276 unsigned &AllocatedVFP,
3277 bool &IsHA,
3278 unsigned &AllocatedGPR,
Bob Wilson373af732014-04-21 01:23:39 +00003279 bool &IsSmallAggr,
3280 bool IsNamedArg) const {
Tim Northovera2ee4332014-03-29 15:09:45 +00003281 // Handle illegal vector types here.
3282 if (isIllegalVectorType(Ty)) {
3283 uint64_t Size = getContext().getTypeSize(Ty);
3284 if (Size <= 32) {
3285 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
3286 AllocatedGPR++;
3287 return ABIArgInfo::getDirect(ResType);
3288 }
3289 if (Size == 64) {
3290 llvm::Type *ResType =
3291 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
3292 AllocatedVFP++;
3293 return ABIArgInfo::getDirect(ResType);
3294 }
3295 if (Size == 128) {
3296 llvm::Type *ResType =
3297 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
3298 AllocatedVFP++;
3299 return ABIArgInfo::getDirect(ResType);
3300 }
3301 AllocatedGPR++;
3302 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3303 }
3304 if (Ty->isVectorType())
3305 // Size of a legal vector should be either 64 or 128.
3306 AllocatedVFP++;
3307 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3308 if (BT->getKind() == BuiltinType::Half ||
3309 BT->getKind() == BuiltinType::Float ||
3310 BT->getKind() == BuiltinType::Double ||
3311 BT->getKind() == BuiltinType::LongDouble)
3312 AllocatedVFP++;
3313 }
3314
3315 if (!isAggregateTypeForABI(Ty)) {
3316 // Treat an enum type as its underlying type.
3317 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3318 Ty = EnumTy->getDecl()->getIntegerType();
3319
3320 if (!Ty->isFloatingType() && !Ty->isVectorType()) {
Tim Northoverc801b4a2014-04-15 14:55:11 +00003321 unsigned Alignment = getContext().getTypeAlign(Ty);
3322 if (!isDarwinPCS() && Alignment > 64)
3323 AllocatedGPR = llvm::RoundUpToAlignment(AllocatedGPR, Alignment / 64);
3324
Tim Northovera2ee4332014-03-29 15:09:45 +00003325 int RegsNeeded = getContext().getTypeSize(Ty) > 64 ? 2 : 1;
3326 AllocatedGPR += RegsNeeded;
3327 }
3328 return (Ty->isPromotableIntegerType() && isDarwinPCS()
3329 ? ABIArgInfo::getExtend()
3330 : ABIArgInfo::getDirect());
3331 }
3332
3333 // Structures with either a non-trivial destructor or a non-trivial
3334 // copy constructor are always indirect.
3335 if (isRecordReturnIndirect(Ty, getCXXABI())) {
3336 AllocatedGPR++;
3337 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3338 }
3339
3340 // Empty records are always ignored on Darwin, but actually passed in C++ mode
3341 // elsewhere for GNU compatibility.
3342 if (isEmptyRecord(getContext(), Ty, true)) {
3343 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
3344 return ABIArgInfo::getIgnore();
3345
3346 ++AllocatedGPR;
3347 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
3348 }
3349
3350 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
3351 const Type *Base = 0;
3352 uint64_t Members = 0;
3353 if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {
Tim Northovera2ee4332014-03-29 15:09:45 +00003354 IsHA = true;
Bob Wilson373af732014-04-21 01:23:39 +00003355 if (!IsNamedArg && isDarwinPCS()) {
3356 // With the Darwin ABI, variadic arguments are always passed on the stack
3357 // and should not be expanded. Treat variadic HFAs as arrays of doubles.
3358 uint64_t Size = getContext().getTypeSize(Ty);
3359 llvm::Type *BaseTy = llvm::Type::getDoubleTy(getVMContext());
3360 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
3361 }
3362 AllocatedVFP += Members;
Tim Northovera2ee4332014-03-29 15:09:45 +00003363 return ABIArgInfo::getExpand();
3364 }
3365
3366 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
3367 uint64_t Size = getContext().getTypeSize(Ty);
3368 if (Size <= 128) {
Tim Northoverc801b4a2014-04-15 14:55:11 +00003369 unsigned Alignment = getContext().getTypeAlign(Ty);
3370 if (!isDarwinPCS() && Alignment > 64)
3371 AllocatedGPR = llvm::RoundUpToAlignment(AllocatedGPR, Alignment / 64);
3372
Tim Northovera2ee4332014-03-29 15:09:45 +00003373 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
3374 AllocatedGPR += Size / 64;
3375 IsSmallAggr = true;
3376 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
3377 // For aggregates with 16-byte alignment, we use i128.
Tim Northoverc801b4a2014-04-15 14:55:11 +00003378 if (Alignment < 128 && Size == 128) {
Tim Northovera2ee4332014-03-29 15:09:45 +00003379 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
3380 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
3381 }
3382 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
3383 }
3384
3385 AllocatedGPR++;
3386 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3387}
3388
3389ABIArgInfo ARM64ABIInfo::classifyReturnType(QualType RetTy) const {
3390 if (RetTy->isVoidType())
3391 return ABIArgInfo::getIgnore();
3392
3393 // Large vector types should be returned via memory.
3394 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
3395 return ABIArgInfo::getIndirect(0);
3396
3397 if (!isAggregateTypeForABI(RetTy)) {
3398 // Treat an enum type as its underlying type.
3399 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3400 RetTy = EnumTy->getDecl()->getIntegerType();
3401
Tim Northover4dab6982014-04-18 13:46:08 +00003402 return (RetTy->isPromotableIntegerType() && isDarwinPCS()
3403 ? ABIArgInfo::getExtend()
3404 : ABIArgInfo::getDirect());
Tim Northovera2ee4332014-03-29 15:09:45 +00003405 }
3406
3407 // Structures with either a non-trivial destructor or a non-trivial
3408 // copy constructor are always indirect.
3409 if (isRecordReturnIndirect(RetTy, getCXXABI()))
3410 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3411
3412 if (isEmptyRecord(getContext(), RetTy, true))
3413 return ABIArgInfo::getIgnore();
3414
3415 const Type *Base = 0;
3416 if (isHomogeneousAggregate(RetTy, Base, getContext()))
3417 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
3418 return ABIArgInfo::getDirect();
3419
3420 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
3421 uint64_t Size = getContext().getTypeSize(RetTy);
3422 if (Size <= 128) {
3423 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
3424 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
3425 }
3426
3427 return ABIArgInfo::getIndirect(0);
3428}
3429
3430/// isIllegalVectorType - check whether the vector type is legal for ARM64.
3431bool ARM64ABIInfo::isIllegalVectorType(QualType Ty) const {
3432 if (const VectorType *VT = Ty->getAs<VectorType>()) {
3433 // Check whether VT is legal.
3434 unsigned NumElements = VT->getNumElements();
3435 uint64_t Size = getContext().getTypeSize(VT);
3436 // NumElements should be power of 2 between 1 and 16.
3437 if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16)
3438 return true;
3439 return Size != 64 && (Size != 128 || NumElements == 1);
3440 }
3441 return false;
3442}
3443
3444static llvm::Value *EmitAArch64VAArg(llvm::Value *VAListAddr, QualType Ty,
3445 int AllocatedGPR, int AllocatedVFP,
3446 bool IsIndirect, CodeGenFunction &CGF) {
3447 // The AArch64 va_list type and handling is specified in the Procedure Call
3448 // Standard, section B.4:
3449 //
3450 // struct {
3451 // void *__stack;
3452 // void *__gr_top;
3453 // void *__vr_top;
3454 // int __gr_offs;
3455 // int __vr_offs;
3456 // };
3457
3458 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
3459 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3460 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
3461 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3462 auto &Ctx = CGF.getContext();
3463
3464 llvm::Value *reg_offs_p = 0, *reg_offs = 0;
3465 int reg_top_index;
3466 int RegSize;
3467 if (AllocatedGPR) {
3468 assert(!AllocatedVFP && "Arguments never split between int & VFP regs");
3469 // 3 is the field number of __gr_offs
3470 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
3471 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
3472 reg_top_index = 1; // field number for __gr_top
3473 RegSize = 8 * AllocatedGPR;
3474 } else {
3475 assert(!AllocatedGPR && "Argument must go in VFP or int regs");
3476 // 4 is the field number of __vr_offs.
3477 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
3478 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
3479 reg_top_index = 2; // field number for __vr_top
3480 RegSize = 16 * AllocatedVFP;
3481 }
3482
3483 //=======================================
3484 // Find out where argument was passed
3485 //=======================================
3486
3487 // If reg_offs >= 0 we're already using the stack for this type of
3488 // argument. We don't want to keep updating reg_offs (in case it overflows,
3489 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
3490 // whatever they get).
3491 llvm::Value *UsingStack = 0;
3492 UsingStack = CGF.Builder.CreateICmpSGE(
3493 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
3494
3495 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
3496
3497 // Otherwise, at least some kind of argument could go in these registers, the
Bob Wilson3abf1692014-04-21 01:23:36 +00003498 // question is whether this particular type is too big.
Tim Northovera2ee4332014-03-29 15:09:45 +00003499 CGF.EmitBlock(MaybeRegBlock);
3500
3501 // Integer arguments may need to correct register alignment (for example a
3502 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
3503 // align __gr_offs to calculate the potential address.
3504 if (AllocatedGPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
3505 int Align = Ctx.getTypeAlign(Ty) / 8;
3506
3507 reg_offs = CGF.Builder.CreateAdd(
3508 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
3509 "align_regoffs");
3510 reg_offs = CGF.Builder.CreateAnd(
3511 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
3512 "aligned_regoffs");
3513 }
3514
3515 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
3516 llvm::Value *NewOffset = 0;
3517 NewOffset = CGF.Builder.CreateAdd(
3518 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
3519 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
3520
3521 // Now we're in a position to decide whether this argument really was in
3522 // registers or not.
3523 llvm::Value *InRegs = 0;
3524 InRegs = CGF.Builder.CreateICmpSLE(
3525 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
3526
3527 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
3528
3529 //=======================================
3530 // Argument was in registers
3531 //=======================================
3532
3533 // Now we emit the code for if the argument was originally passed in
3534 // registers. First start the appropriate block:
3535 CGF.EmitBlock(InRegBlock);
3536
3537 llvm::Value *reg_top_p = 0, *reg_top = 0;
3538 reg_top_p =
3539 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
3540 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
3541 llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
3542 llvm::Value *RegAddr = 0;
3543 llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
3544
3545 if (IsIndirect) {
3546 // If it's been passed indirectly (actually a struct), whatever we find from
3547 // stored registers or on the stack will actually be a struct **.
3548 MemTy = llvm::PointerType::getUnqual(MemTy);
3549 }
3550
3551 const Type *Base = 0;
3552 uint64_t NumMembers;
James Molloy467be602014-05-07 14:45:55 +00003553 bool IsHFA = isHomogeneousAggregate(Ty, Base, Ctx, &NumMembers);
3554 if (IsHFA && NumMembers > 1) {
Tim Northovera2ee4332014-03-29 15:09:45 +00003555 // Homogeneous aggregates passed in registers will have their elements split
3556 // and stored 16-bytes apart regardless of size (they're notionally in qN,
3557 // qN+1, ...). We reload and store into a temporary local variable
3558 // contiguously.
3559 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
3560 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
3561 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
3562 llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy);
3563 int Offset = 0;
3564
3565 if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128)
3566 Offset = 16 - Ctx.getTypeSize(Base) / 8;
3567 for (unsigned i = 0; i < NumMembers; ++i) {
3568 llvm::Value *BaseOffset =
3569 llvm::ConstantInt::get(CGF.Int32Ty, 16 * i + Offset);
3570 llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
3571 LoadAddr = CGF.Builder.CreateBitCast(
3572 LoadAddr, llvm::PointerType::getUnqual(BaseTy));
3573 llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i);
3574
3575 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
3576 CGF.Builder.CreateStore(Elem, StoreAddr);
3577 }
3578
3579 RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
3580 } else {
3581 // Otherwise the object is contiguous in memory
3582 unsigned BeAlign = reg_top_index == 2 ? 16 : 8;
James Molloy467be602014-05-07 14:45:55 +00003583 if (CGF.CGM.getDataLayout().isBigEndian() &&
3584 (IsHFA || !isAggregateTypeForABI(Ty)) &&
Tim Northovera2ee4332014-03-29 15:09:45 +00003585 Ctx.getTypeSize(Ty) < (BeAlign * 8)) {
3586 int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8;
3587 BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty);
3588
3589 BaseAddr = CGF.Builder.CreateAdd(
3590 BaseAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
3591
3592 BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy);
3593 }
3594
3595 RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
3596 }
3597
3598 CGF.EmitBranch(ContBlock);
3599
3600 //=======================================
3601 // Argument was on the stack
3602 //=======================================
3603 CGF.EmitBlock(OnStackBlock);
3604
3605 llvm::Value *stack_p = 0, *OnStackAddr = 0;
3606 stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
3607 OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
3608
3609 // Again, stack arguments may need realigmnent. In this case both integer and
3610 // floating-point ones might be affected.
3611 if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
3612 int Align = Ctx.getTypeAlign(Ty) / 8;
3613
3614 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
3615
3616 OnStackAddr = CGF.Builder.CreateAdd(
3617 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
3618 "align_stack");
3619 OnStackAddr = CGF.Builder.CreateAnd(
3620 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
3621 "align_stack");
3622
3623 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
3624 }
3625
3626 uint64_t StackSize;
3627 if (IsIndirect)
3628 StackSize = 8;
3629 else
3630 StackSize = Ctx.getTypeSize(Ty) / 8;
3631
3632 // All stack slots are 8 bytes
3633 StackSize = llvm::RoundUpToAlignment(StackSize, 8);
3634
3635 llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
3636 llvm::Value *NewStack =
3637 CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, "new_stack");
3638
3639 // Write the new value of __stack for the next call to va_arg
3640 CGF.Builder.CreateStore(NewStack, stack_p);
3641
3642 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
3643 Ctx.getTypeSize(Ty) < 64) {
3644 int Offset = 8 - Ctx.getTypeSize(Ty) / 8;
3645 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
3646
3647 OnStackAddr = CGF.Builder.CreateAdd(
3648 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
3649
3650 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
3651 }
3652
3653 OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
3654
3655 CGF.EmitBranch(ContBlock);
3656
3657 //=======================================
3658 // Tidy up
3659 //=======================================
3660 CGF.EmitBlock(ContBlock);
3661
3662 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
3663 ResAddr->addIncoming(RegAddr, InRegBlock);
3664 ResAddr->addIncoming(OnStackAddr, OnStackBlock);
3665
3666 if (IsIndirect)
3667 return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
3668
3669 return ResAddr;
3670}
3671
3672llvm::Value *ARM64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
3673 CodeGenFunction &CGF) const {
3674
3675 unsigned AllocatedGPR = 0, AllocatedVFP = 0;
3676 bool IsHA = false, IsSmallAggr = false;
Bob Wilson373af732014-04-21 01:23:39 +00003677 ABIArgInfo AI = classifyArgumentType(Ty, AllocatedVFP, IsHA, AllocatedGPR,
3678 IsSmallAggr, false /*IsNamedArg*/);
Tim Northovera2ee4332014-03-29 15:09:45 +00003679
3680 return EmitAArch64VAArg(VAListAddr, Ty, AllocatedGPR, AllocatedVFP,
3681 AI.isIndirect(), CGF);
3682}
3683
3684llvm::Value *ARM64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
3685 CodeGenFunction &CGF) const {
3686 // We do not support va_arg for aggregates or illegal vector types.
3687 // Lower VAArg here for these cases and use the LLVM va_arg instruction for
3688 // other cases.
3689 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
3690 return 0;
3691
3692 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
3693 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
3694
3695 const Type *Base = 0;
3696 bool isHA = isHomogeneousAggregate(Ty, Base, getContext());
3697
3698 bool isIndirect = false;
3699 // Arguments bigger than 16 bytes which aren't homogeneous aggregates should
3700 // be passed indirectly.
3701 if (Size > 16 && !isHA) {
3702 isIndirect = true;
3703 Size = 8;
3704 Align = 8;
3705 }
3706
3707 llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
3708 llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
3709
3710 CGBuilderTy &Builder = CGF.Builder;
3711 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
3712 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3713
3714 if (isEmptyRecord(getContext(), Ty, true)) {
3715 // These are ignored for parameter passing purposes.
3716 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3717 return Builder.CreateBitCast(Addr, PTy);
3718 }
3719
3720 const uint64_t MinABIAlign = 8;
3721 if (Align > MinABIAlign) {
3722 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
3723 Addr = Builder.CreateGEP(Addr, Offset);
3724 llvm::Value *AsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3725 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~(Align - 1));
3726 llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask);
3727 Addr = Builder.CreateIntToPtr(Aligned, BP, "ap.align");
3728 }
3729
3730 uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign);
3731 llvm::Value *NextAddr = Builder.CreateGEP(
3732 Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
3733 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3734
3735 if (isIndirect)
3736 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
3737 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3738 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
3739
3740 return AddrTyped;
3741}
3742
3743//===----------------------------------------------------------------------===//
Daniel Dunbard59655c2009-09-12 00:59:49 +00003744// ARM ABI Implementation
Chris Lattner0cf24192010-06-28 20:05:43 +00003745//===----------------------------------------------------------------------===//
Daniel Dunbard59655c2009-09-12 00:59:49 +00003746
3747namespace {
3748
Anton Korobeynikov244360d2009-06-05 22:08:42 +00003749class ARMABIInfo : public ABIInfo {
Daniel Dunbar020daa92009-09-12 01:00:39 +00003750public:
3751 enum ABIKind {
3752 APCS = 0,
3753 AAPCS = 1,
3754 AAPCS_VFP
3755 };
3756
3757private:
3758 ABIKind Kind;
Oliver Stannard405bded2014-02-11 09:25:50 +00003759 mutable int VFPRegs[16];
3760 const unsigned NumVFPs;
3761 const unsigned NumGPRs;
3762 mutable unsigned AllocatedGPRs;
3763 mutable unsigned AllocatedVFPs;
Daniel Dunbar020daa92009-09-12 01:00:39 +00003764
3765public:
Oliver Stannard405bded2014-02-11 09:25:50 +00003766 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind),
3767 NumVFPs(16), NumGPRs(4) {
John McCall882987f2013-02-28 19:01:20 +00003768 setRuntimeCC();
Oliver Stannard405bded2014-02-11 09:25:50 +00003769 resetAllocatedRegs();
John McCall882987f2013-02-28 19:01:20 +00003770 }
Daniel Dunbar020daa92009-09-12 01:00:39 +00003771
John McCall3480ef22011-08-30 01:42:09 +00003772 bool isEABI() const {
Joerg Sonnenberger782e6aa2013-12-12 21:29:27 +00003773 switch (getTarget().getTriple().getEnvironment()) {
3774 case llvm::Triple::Android:
3775 case llvm::Triple::EABI:
Joerg Sonnenbergerd75a1f82013-12-16 19:16:04 +00003776 case llvm::Triple::EABIHF:
Joerg Sonnenberger782e6aa2013-12-12 21:29:27 +00003777 case llvm::Triple::GNUEABI:
Joerg Sonnenberger0c1652d2013-12-16 18:30:28 +00003778 case llvm::Triple::GNUEABIHF:
Joerg Sonnenberger782e6aa2013-12-12 21:29:27 +00003779 return true;
3780 default:
3781 return false;
3782 }
John McCall3480ef22011-08-30 01:42:09 +00003783 }
3784
Joerg Sonnenbergerd75a1f82013-12-16 19:16:04 +00003785 bool isEABIHF() const {
3786 switch (getTarget().getTriple().getEnvironment()) {
3787 case llvm::Triple::EABIHF:
3788 case llvm::Triple::GNUEABIHF:
3789 return true;
3790 default:
3791 return false;
3792 }
3793 }
3794
Daniel Dunbar020daa92009-09-12 01:00:39 +00003795 ABIKind getABIKind() const { return Kind; }
3796
Tim Northovera484bc02013-10-01 14:34:25 +00003797private:
Amara Emerson9dc78782014-01-28 10:56:36 +00003798 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
Oliver Stannard405bded2014-02-11 09:25:50 +00003799 ABIArgInfo classifyArgumentType(QualType RetTy, bool &IsHA, bool isVariadic,
3800 bool &IsCPRC) const;
Manman Renfef9e312012-10-16 19:18:39 +00003801 bool isIllegalVectorType(QualType Ty) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00003802
Craig Topper4f12f102014-03-12 06:41:41 +00003803 void computeInfo(CGFunctionInfo &FI) const override;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00003804
Craig Topper4f12f102014-03-12 06:41:41 +00003805 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3806 CodeGenFunction &CGF) const override;
John McCall882987f2013-02-28 19:01:20 +00003807
3808 llvm::CallingConv::ID getLLVMDefaultCC() const;
3809 llvm::CallingConv::ID getABIDefaultCC() const;
3810 void setRuntimeCC();
Oliver Stannard405bded2014-02-11 09:25:50 +00003811
3812 void markAllocatedGPRs(unsigned Alignment, unsigned NumRequired) const;
3813 void markAllocatedVFPs(unsigned Alignment, unsigned NumRequired) const;
3814 void resetAllocatedRegs(void) const;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00003815};
3816
Anton Korobeynikov55bcea12010-01-10 12:58:08 +00003817class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
3818public:
Chris Lattner2b037972010-07-29 02:01:43 +00003819 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
3820 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
John McCallbeec5a02010-03-06 00:35:14 +00003821
John McCall3480ef22011-08-30 01:42:09 +00003822 const ARMABIInfo &getABIInfo() const {
3823 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
3824 }
3825
Craig Topper4f12f102014-03-12 06:41:41 +00003826 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
John McCallbeec5a02010-03-06 00:35:14 +00003827 return 13;
3828 }
Roman Divackyc1617352011-05-18 19:36:54 +00003829
Craig Topper4f12f102014-03-12 06:41:41 +00003830 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
John McCall31168b02011-06-15 23:02:42 +00003831 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
3832 }
3833
Roman Divackyc1617352011-05-18 19:36:54 +00003834 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
Craig Topper4f12f102014-03-12 06:41:41 +00003835 llvm::Value *Address) const override {
Chris Lattnerece04092012-02-07 00:39:47 +00003836 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
Roman Divackyc1617352011-05-18 19:36:54 +00003837
3838 // 0-15 are the 16 integer registers.
Chris Lattnerece04092012-02-07 00:39:47 +00003839 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
Roman Divackyc1617352011-05-18 19:36:54 +00003840 return false;
3841 }
John McCall3480ef22011-08-30 01:42:09 +00003842
Craig Topper4f12f102014-03-12 06:41:41 +00003843 unsigned getSizeOfUnwindException() const override {
John McCall3480ef22011-08-30 01:42:09 +00003844 if (getABIInfo().isEABI()) return 88;
3845 return TargetCodeGenInfo::getSizeOfUnwindException();
3846 }
Tim Northovera484bc02013-10-01 14:34:25 +00003847
3848 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
Craig Topper4f12f102014-03-12 06:41:41 +00003849 CodeGen::CodeGenModule &CGM) const override {
Tim Northovera484bc02013-10-01 14:34:25 +00003850 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
3851 if (!FD)
3852 return;
3853
3854 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
3855 if (!Attr)
3856 return;
3857
3858 const char *Kind;
3859 switch (Attr->getInterrupt()) {
3860 case ARMInterruptAttr::Generic: Kind = ""; break;
3861 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
3862 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
3863 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
3864 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
3865 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
3866 }
3867
3868 llvm::Function *Fn = cast<llvm::Function>(GV);
3869
3870 Fn->addFnAttr("interrupt", Kind);
3871
3872 if (cast<ARMABIInfo>(getABIInfo()).getABIKind() == ARMABIInfo::APCS)
3873 return;
3874
3875 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
3876 // however this is not necessarily true on taking any interrupt. Instruct
3877 // the backend to perform a realignment as part of the function prologue.
3878 llvm::AttrBuilder B;
3879 B.addStackAlignmentAttr(8);
3880 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
3881 llvm::AttributeSet::get(CGM.getLLVMContext(),
3882 llvm::AttributeSet::FunctionIndex,
3883 B));
3884 }
3885
Anton Korobeynikov55bcea12010-01-10 12:58:08 +00003886};
3887
Daniel Dunbard59655c2009-09-12 00:59:49 +00003888}
3889
Chris Lattner22326a12010-07-29 02:31:05 +00003890void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
Manman Ren2a523d82012-10-30 23:21:41 +00003891 // To correctly handle Homogeneous Aggregate, we need to keep track of the
Manman Renb505d332012-10-31 19:02:26 +00003892 // VFP registers allocated so far.
Manman Ren2a523d82012-10-30 23:21:41 +00003893 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
3894 // VFP registers of the appropriate type unallocated then the argument is
3895 // allocated to the lowest-numbered sequence of such registers.
3896 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
3897 // unallocated are marked as unavailable.
Oliver Stannard405bded2014-02-11 09:25:50 +00003898 resetAllocatedRegs();
3899
Amara Emerson9dc78782014-01-28 10:56:36 +00003900 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic());
Aaron Ballmanec47bc22014-03-17 18:10:01 +00003901 for (auto &I : FI.arguments()) {
Oliver Stannard405bded2014-02-11 09:25:50 +00003902 unsigned PreAllocationVFPs = AllocatedVFPs;
3903 unsigned PreAllocationGPRs = AllocatedGPRs;
Manman Ren2a523d82012-10-30 23:21:41 +00003904 bool IsHA = false;
Oliver Stannard405bded2014-02-11 09:25:50 +00003905 bool IsCPRC = false;
Manman Ren2a523d82012-10-30 23:21:41 +00003906 // 6.1.2.3 There is one VFP co-processor register class using registers
3907 // s0-s15 (d0-d7) for passing arguments.
Aaron Ballmanec47bc22014-03-17 18:10:01 +00003908 I.info = classifyArgumentType(I.type, IsHA, FI.isVariadic(), IsCPRC);
Oliver Stannard405bded2014-02-11 09:25:50 +00003909 assert((IsCPRC || !IsHA) && "Homogeneous aggregates must be CPRCs");
Manman Ren2a523d82012-10-30 23:21:41 +00003910 // If we do not have enough VFP registers for the HA, any VFP registers
3911 // that are unallocated are marked as unavailable. To achieve this, we add
Oliver Stannard405bded2014-02-11 09:25:50 +00003912 // padding of (NumVFPs - PreAllocationVFP) floats.
Amara Emerson9dc78782014-01-28 10:56:36 +00003913 // Note that IsHA will only be set when using the AAPCS-VFP calling convention,
3914 // and the callee is not variadic.
Oliver Stannard405bded2014-02-11 09:25:50 +00003915 if (IsHA && AllocatedVFPs > NumVFPs && PreAllocationVFPs < NumVFPs) {
Manman Ren2a523d82012-10-30 23:21:41 +00003916 llvm::Type *PaddingTy = llvm::ArrayType::get(
Oliver Stannard405bded2014-02-11 09:25:50 +00003917 llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocationVFPs);
Aaron Ballmanec47bc22014-03-17 18:10:01 +00003918 I.info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
Oliver Stannard405bded2014-02-11 09:25:50 +00003919 }
3920
3921 // If we have allocated some arguments onto the stack (due to running
3922 // out of VFP registers), we cannot split an argument between GPRs and
3923 // the stack. If this situation occurs, we add padding to prevent the
3924 // GPRs from being used. In this situiation, the current argument could
3925 // only be allocated by rule C.8, so rule C.6 would mark these GPRs as
3926 // unusable anyway.
3927 const bool StackUsed = PreAllocationGPRs > NumGPRs || PreAllocationVFPs > NumVFPs;
3928 if (!IsCPRC && PreAllocationGPRs < NumGPRs && AllocatedGPRs > NumGPRs && StackUsed) {
3929 llvm::Type *PaddingTy = llvm::ArrayType::get(
3930 llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreAllocationGPRs);
Oliver Stannard39d26c92014-05-07 10:39:12 +00003931 I.info = ABIArgInfo::getDirect(nullptr /* type */, 0 /* offset */,
3932 PaddingTy);
Manman Ren2a523d82012-10-30 23:21:41 +00003933 }
3934 }
Daniel Dunbar020daa92009-09-12 01:00:39 +00003935
Anton Korobeynikov231e8752011-04-14 20:06:49 +00003936 // Always honor user-specified calling convention.
3937 if (FI.getCallingConvention() != llvm::CallingConv::C)
3938 return;
3939
John McCall882987f2013-02-28 19:01:20 +00003940 llvm::CallingConv::ID cc = getRuntimeCC();
3941 if (cc != llvm::CallingConv::C)
3942 FI.setEffectiveCallingConvention(cc);
3943}
Rafael Espindolaa92c4422010-06-16 16:13:39 +00003944
John McCall882987f2013-02-28 19:01:20 +00003945/// Return the default calling convention that LLVM will use.
3946llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
3947 // The default calling convention that LLVM will infer.
Joerg Sonnenbergerd75a1f82013-12-16 19:16:04 +00003948 if (isEABIHF())
John McCall882987f2013-02-28 19:01:20 +00003949 return llvm::CallingConv::ARM_AAPCS_VFP;
3950 else if (isEABI())
3951 return llvm::CallingConv::ARM_AAPCS;
3952 else
3953 return llvm::CallingConv::ARM_APCS;
3954}
3955
3956/// Return the calling convention that our ABI would like us to use
3957/// as the C calling convention.
3958llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
Daniel Dunbar020daa92009-09-12 01:00:39 +00003959 switch (getABIKind()) {
John McCall882987f2013-02-28 19:01:20 +00003960 case APCS: return llvm::CallingConv::ARM_APCS;
3961 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
3962 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
Daniel Dunbar020daa92009-09-12 01:00:39 +00003963 }
John McCall882987f2013-02-28 19:01:20 +00003964 llvm_unreachable("bad ABI kind");
3965}
3966
3967void ARMABIInfo::setRuntimeCC() {
3968 assert(getRuntimeCC() == llvm::CallingConv::C);
3969
3970 // Don't muddy up the IR with a ton of explicit annotations if
3971 // they'd just match what LLVM will infer from the triple.
3972 llvm::CallingConv::ID abiCC = getABIDefaultCC();
3973 if (abiCC != getLLVMDefaultCC())
3974 RuntimeCC = abiCC;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00003975}
3976
Bob Wilsone826a2a2011-08-03 05:58:22 +00003977/// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous
3978/// aggregate. If HAMembers is non-null, the number of base elements
3979/// contained in the type is returned through it; this is used for the
3980/// recursive calls that check aggregate component types.
3981static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
Tim Northovera2ee4332014-03-29 15:09:45 +00003982 ASTContext &Context, uint64_t *HAMembers) {
Anton Korobeynikov4215ca72012-04-13 11:22:00 +00003983 uint64_t Members = 0;
Bob Wilsone826a2a2011-08-03 05:58:22 +00003984 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
3985 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members))
3986 return false;
3987 Members *= AT->getSize().getZExtValue();
3988 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
3989 const RecordDecl *RD = RT->getDecl();
Anton Korobeynikov4215ca72012-04-13 11:22:00 +00003990 if (RD->hasFlexibleArrayMember())
Bob Wilsone826a2a2011-08-03 05:58:22 +00003991 return false;
Anton Korobeynikov4215ca72012-04-13 11:22:00 +00003992
Bob Wilsone826a2a2011-08-03 05:58:22 +00003993 Members = 0;
Aaron Ballmane8a8bae2014-03-08 20:12:42 +00003994 for (const auto *FD : RD->fields()) {
Bob Wilsone826a2a2011-08-03 05:58:22 +00003995 uint64_t FldMembers;
3996 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers))
3997 return false;
Anton Korobeynikov4215ca72012-04-13 11:22:00 +00003998
3999 Members = (RD->isUnion() ?
4000 std::max(Members, FldMembers) : Members + FldMembers);
Bob Wilsone826a2a2011-08-03 05:58:22 +00004001 }
4002 } else {
4003 Members = 1;
4004 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
4005 Members = 2;
4006 Ty = CT->getElementType();
4007 }
4008
4009 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
4010 // double, or 64-bit or 128-bit vectors.
4011 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4012 if (BT->getKind() != BuiltinType::Float &&
Tim Northovereb752d42012-07-20 22:29:29 +00004013 BT->getKind() != BuiltinType::Double &&
4014 BT->getKind() != BuiltinType::LongDouble)
Bob Wilsone826a2a2011-08-03 05:58:22 +00004015 return false;
4016 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
4017 unsigned VecSize = Context.getTypeSize(VT);
4018 if (VecSize != 64 && VecSize != 128)
4019 return false;
4020 } else {
4021 return false;
4022 }
4023
4024 // The base type must be the same for all members. Vector types of the
4025 // same total size are treated as being equivalent here.
4026 const Type *TyPtr = Ty.getTypePtr();
4027 if (!Base)
4028 Base = TyPtr;
Oliver Stannard5e8558f2014-02-07 11:25:57 +00004029
4030 if (Base != TyPtr) {
4031 // Homogeneous aggregates are defined as containing members with the
4032 // same machine type. There are two cases in which two members have
4033 // different TypePtrs but the same machine type:
4034
4035 // 1) Vectors of the same length, regardless of the type and number
4036 // of their members.
4037 const bool SameLengthVectors = Base->isVectorType() && TyPtr->isVectorType()
4038 && (Context.getTypeSize(Base) == Context.getTypeSize(TyPtr));
4039
4040 // 2) In the 32-bit AAPCS, `double' and `long double' have the same
4041 // machine type. This is not the case for the 64-bit AAPCS.
4042 const bool SameSizeDoubles =
4043 ( ( Base->isSpecificBuiltinType(BuiltinType::Double)
4044 && TyPtr->isSpecificBuiltinType(BuiltinType::LongDouble))
4045 || ( Base->isSpecificBuiltinType(BuiltinType::LongDouble)
4046 && TyPtr->isSpecificBuiltinType(BuiltinType::Double)))
4047 && (Context.getTypeSize(Base) == Context.getTypeSize(TyPtr));
4048
4049 if (!SameLengthVectors && !SameSizeDoubles)
4050 return false;
4051 }
Bob Wilsone826a2a2011-08-03 05:58:22 +00004052 }
4053
4054 // Homogeneous Aggregates can have at most 4 members of the base type.
4055 if (HAMembers)
4056 *HAMembers = Members;
Anton Korobeynikov4215ca72012-04-13 11:22:00 +00004057
4058 return (Members > 0 && Members <= 4);
Bob Wilsone826a2a2011-08-03 05:58:22 +00004059}
4060
Manman Renb505d332012-10-31 19:02:26 +00004061/// markAllocatedVFPs - update VFPRegs according to the alignment and
4062/// number of VFP registers (unit is S register) requested.
Oliver Stannard405bded2014-02-11 09:25:50 +00004063void ARMABIInfo::markAllocatedVFPs(unsigned Alignment,
4064 unsigned NumRequired) const {
Manman Renb505d332012-10-31 19:02:26 +00004065 // Early Exit.
Oliver Stannard405bded2014-02-11 09:25:50 +00004066 if (AllocatedVFPs >= 16) {
4067 // We use AllocatedVFP > 16 to signal that some CPRCs were allocated on
4068 // the stack.
4069 AllocatedVFPs = 17;
Manman Renb505d332012-10-31 19:02:26 +00004070 return;
Oliver Stannard405bded2014-02-11 09:25:50 +00004071 }
Manman Renb505d332012-10-31 19:02:26 +00004072 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
4073 // VFP registers of the appropriate type unallocated then the argument is
4074 // allocated to the lowest-numbered sequence of such registers.
4075 for (unsigned I = 0; I < 16; I += Alignment) {
4076 bool FoundSlot = true;
4077 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
4078 if (J >= 16 || VFPRegs[J]) {
4079 FoundSlot = false;
4080 break;
4081 }
4082 if (FoundSlot) {
4083 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
4084 VFPRegs[J] = 1;
Oliver Stannard405bded2014-02-11 09:25:50 +00004085 AllocatedVFPs += NumRequired;
Manman Renb505d332012-10-31 19:02:26 +00004086 return;
4087 }
4088 }
4089 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
4090 // unallocated are marked as unavailable.
4091 for (unsigned I = 0; I < 16; I++)
4092 VFPRegs[I] = 1;
Oliver Stannard405bded2014-02-11 09:25:50 +00004093 AllocatedVFPs = 17; // We do not have enough VFP registers.
Manman Renb505d332012-10-31 19:02:26 +00004094}
4095
Oliver Stannard405bded2014-02-11 09:25:50 +00004096/// Update AllocatedGPRs to record the number of general purpose registers
4097/// which have been allocated. It is valid for AllocatedGPRs to go above 4,
4098/// this represents arguments being stored on the stack.
4099void ARMABIInfo::markAllocatedGPRs(unsigned Alignment,
4100 unsigned NumRequired) const {
4101 assert((Alignment == 1 || Alignment == 2) && "Alignment must be 4 or 8 bytes");
4102
4103 if (Alignment == 2 && AllocatedGPRs & 0x1)
4104 AllocatedGPRs += 1;
4105
4106 AllocatedGPRs += NumRequired;
4107}
4108
4109void ARMABIInfo::resetAllocatedRegs(void) const {
4110 AllocatedGPRs = 0;
4111 AllocatedVFPs = 0;
4112 for (unsigned i = 0; i < NumVFPs; ++i)
4113 VFPRegs[i] = 0;
4114}
4115
4116ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool &IsHA,
4117 bool isVariadic,
4118 bool &IsCPRC) const {
Manman Ren2a523d82012-10-30 23:21:41 +00004119 // We update number of allocated VFPs according to
4120 // 6.1.2.1 The following argument types are VFP CPRCs:
4121 // A single-precision floating-point type (including promoted
4122 // half-precision types); A double-precision floating-point type;
4123 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
4124 // with a Base Type of a single- or double-precision floating-point type,
4125 // 64-bit containerized vectors or 128-bit containerized vectors with one
4126 // to four Elements.
4127
Manman Renfef9e312012-10-16 19:18:39 +00004128 // Handle illegal vector types here.
4129 if (isIllegalVectorType(Ty)) {
4130 uint64_t Size = getContext().getTypeSize(Ty);
4131 if (Size <= 32) {
4132 llvm::Type *ResType =
4133 llvm::Type::getInt32Ty(getVMContext());
Oliver Stannard405bded2014-02-11 09:25:50 +00004134 markAllocatedGPRs(1, 1);
Manman Renfef9e312012-10-16 19:18:39 +00004135 return ABIArgInfo::getDirect(ResType);
4136 }
4137 if (Size == 64) {
4138 llvm::Type *ResType = llvm::VectorType::get(
4139 llvm::Type::getInt32Ty(getVMContext()), 2);
Oliver Stannard405bded2014-02-11 09:25:50 +00004140 if (getABIKind() == ARMABIInfo::AAPCS || isVariadic){
4141 markAllocatedGPRs(2, 2);
4142 } else {
4143 markAllocatedVFPs(2, 2);
4144 IsCPRC = true;
4145 }
Manman Renfef9e312012-10-16 19:18:39 +00004146 return ABIArgInfo::getDirect(ResType);
4147 }
4148 if (Size == 128) {
4149 llvm::Type *ResType = llvm::VectorType::get(
4150 llvm::Type::getInt32Ty(getVMContext()), 4);
Oliver Stannard405bded2014-02-11 09:25:50 +00004151 if (getABIKind() == ARMABIInfo::AAPCS || isVariadic) {
4152 markAllocatedGPRs(2, 4);
4153 } else {
4154 markAllocatedVFPs(4, 4);
4155 IsCPRC = true;
4156 }
Manman Renfef9e312012-10-16 19:18:39 +00004157 return ABIArgInfo::getDirect(ResType);
4158 }
Oliver Stannard405bded2014-02-11 09:25:50 +00004159 markAllocatedGPRs(1, 1);
Manman Renfef9e312012-10-16 19:18:39 +00004160 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
4161 }
Manman Renb505d332012-10-31 19:02:26 +00004162 // Update VFPRegs for legal vector types.
Oliver Stannard405bded2014-02-11 09:25:50 +00004163 if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
4164 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4165 uint64_t Size = getContext().getTypeSize(VT);
4166 // Size of a legal vector should be power of 2 and above 64.
4167 markAllocatedVFPs(Size >= 128 ? 4 : 2, Size / 32);
4168 IsCPRC = true;
4169 }
Manman Ren2a523d82012-10-30 23:21:41 +00004170 }
Manman Renb505d332012-10-31 19:02:26 +00004171 // Update VFPRegs for floating point types.
Oliver Stannard405bded2014-02-11 09:25:50 +00004172 if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
4173 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4174 if (BT->getKind() == BuiltinType::Half ||
4175 BT->getKind() == BuiltinType::Float) {
4176 markAllocatedVFPs(1, 1);
4177 IsCPRC = true;
4178 }
4179 if (BT->getKind() == BuiltinType::Double ||
4180 BT->getKind() == BuiltinType::LongDouble) {
4181 markAllocatedVFPs(2, 2);
4182 IsCPRC = true;
4183 }
4184 }
Manman Ren2a523d82012-10-30 23:21:41 +00004185 }
Manman Renfef9e312012-10-16 19:18:39 +00004186
John McCalla1dee5302010-08-22 10:59:02 +00004187 if (!isAggregateTypeForABI(Ty)) {
Douglas Gregora71cc152010-02-02 20:10:50 +00004188 // Treat an enum type as its underlying type.
Oliver Stannard405bded2014-02-11 09:25:50 +00004189 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
Douglas Gregora71cc152010-02-02 20:10:50 +00004190 Ty = EnumTy->getDecl()->getIntegerType();
Oliver Stannard405bded2014-02-11 09:25:50 +00004191 }
Douglas Gregora71cc152010-02-02 20:10:50 +00004192
Oliver Stannard405bded2014-02-11 09:25:50 +00004193 unsigned Size = getContext().getTypeSize(Ty);
4194 if (!IsCPRC)
4195 markAllocatedGPRs(Size > 32 ? 2 : 1, (Size + 31) / 32);
Anton Korobeynikov18adbf52009-06-06 09:36:29 +00004196 return (Ty->isPromotableIntegerType() ?
4197 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Douglas Gregora71cc152010-02-02 20:10:50 +00004198 }
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004199
Oliver Stannard405bded2014-02-11 09:25:50 +00004200 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
4201 markAllocatedGPRs(1, 1);
Tim Northover1060eae2013-06-21 22:49:34 +00004202 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
Oliver Stannard405bded2014-02-11 09:25:50 +00004203 }
Tim Northover1060eae2013-06-21 22:49:34 +00004204
Daniel Dunbar09d33622009-09-14 21:54:03 +00004205 // Ignore empty records.
Chris Lattner458b2aa2010-07-29 02:16:43 +00004206 if (isEmptyRecord(getContext(), Ty, true))
Daniel Dunbar09d33622009-09-14 21:54:03 +00004207 return ABIArgInfo::getIgnore();
4208
Amara Emerson9dc78782014-01-28 10:56:36 +00004209 if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
Manman Ren2a523d82012-10-30 23:21:41 +00004210 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
4211 // into VFP registers.
Bob Wilsone826a2a2011-08-03 05:58:22 +00004212 const Type *Base = 0;
Manman Ren2a523d82012-10-30 23:21:41 +00004213 uint64_t Members = 0;
4214 if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {
Anton Korobeynikov4215ca72012-04-13 11:22:00 +00004215 assert(Base && "Base class should be set for homogeneous aggregate");
Manman Ren2a523d82012-10-30 23:21:41 +00004216 // Base can be a floating-point or a vector.
4217 if (Base->isVectorType()) {
4218 // ElementSize is in number of floats.
4219 unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4;
Oliver Stannard405bded2014-02-11 09:25:50 +00004220 markAllocatedVFPs(ElementSize,
Manman Ren77b02382012-11-06 19:05:29 +00004221 Members * ElementSize);
Manman Ren2a523d82012-10-30 23:21:41 +00004222 } else if (Base->isSpecificBuiltinType(BuiltinType::Float))
Oliver Stannard405bded2014-02-11 09:25:50 +00004223 markAllocatedVFPs(1, Members);
Manman Ren2a523d82012-10-30 23:21:41 +00004224 else {
4225 assert(Base->isSpecificBuiltinType(BuiltinType::Double) ||
4226 Base->isSpecificBuiltinType(BuiltinType::LongDouble));
Oliver Stannard405bded2014-02-11 09:25:50 +00004227 markAllocatedVFPs(2, Members * 2);
Manman Ren2a523d82012-10-30 23:21:41 +00004228 }
4229 IsHA = true;
Oliver Stannard405bded2014-02-11 09:25:50 +00004230 IsCPRC = true;
Bob Wilsone826a2a2011-08-03 05:58:22 +00004231 return ABIArgInfo::getExpand();
Anton Korobeynikov4215ca72012-04-13 11:22:00 +00004232 }
Bob Wilsone826a2a2011-08-03 05:58:22 +00004233 }
4234
Manman Ren6c30e132012-08-13 21:23:55 +00004235 // Support byval for ARM.
Manman Ren77b02382012-11-06 19:05:29 +00004236 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
4237 // most 8-byte. We realign the indirect argument if type alignment is bigger
4238 // than ABI alignment.
Manman Ren505d68f2012-11-05 22:42:46 +00004239 uint64_t ABIAlign = 4;
4240 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
4241 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
4242 getABIKind() == ARMABIInfo::AAPCS)
4243 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
Manman Ren8cd99812012-11-06 04:58:01 +00004244 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
Oliver Stannard405bded2014-02-11 09:25:50 +00004245 // Update Allocated GPRs
4246 markAllocatedGPRs(1, 1);
Oliver Stannard7c3c09e2014-03-12 14:02:50 +00004247 return ABIArgInfo::getIndirect(TyAlign, /*ByVal=*/true,
Manman Ren77b02382012-11-06 19:05:29 +00004248 /*Realign=*/TyAlign > ABIAlign);
Eli Friedmane66abda2012-08-09 00:31:40 +00004249 }
4250
Daniel Dunbarb34b0802010-09-23 01:54:28 +00004251 // Otherwise, pass by coercing to a structure of the appropriate size.
Chris Lattner2192fe52011-07-18 04:24:23 +00004252 llvm::Type* ElemTy;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00004253 unsigned SizeRegs;
Eli Friedmane66abda2012-08-09 00:31:40 +00004254 // FIXME: Try to match the types of the arguments more accurately where
4255 // we can.
4256 if (getContext().getTypeAlign(Ty) <= 32) {
Bob Wilson8e2b75d2011-08-01 23:39:04 +00004257 ElemTy = llvm::Type::getInt32Ty(getVMContext());
4258 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
Oliver Stannard405bded2014-02-11 09:25:50 +00004259 markAllocatedGPRs(1, SizeRegs);
Manman Ren6fdb1582012-06-25 22:04:00 +00004260 } else {
Manman Ren6fdb1582012-06-25 22:04:00 +00004261 ElemTy = llvm::Type::getInt64Ty(getVMContext());
4262 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
Oliver Stannard405bded2014-02-11 09:25:50 +00004263 markAllocatedGPRs(2, SizeRegs * 2);
Stuart Hastingsf2752a32011-04-27 17:24:02 +00004264 }
Stuart Hastings4b214952011-04-28 18:16:06 +00004265
Chris Lattnera5f58b02011-07-09 17:41:47 +00004266 llvm::Type *STy =
Chris Lattner845511f2011-06-18 22:49:11 +00004267 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL);
Stuart Hastings4b214952011-04-28 18:16:06 +00004268 return ABIArgInfo::getDirect(STy);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00004269}
4270
Chris Lattner458b2aa2010-07-29 02:16:43 +00004271static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004272 llvm::LLVMContext &VMContext) {
4273 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
4274 // is called integer-like if its size is less than or equal to one word, and
4275 // the offset of each of its addressable sub-fields is zero.
4276
4277 uint64_t Size = Context.getTypeSize(Ty);
4278
4279 // Check that the type fits in a word.
4280 if (Size > 32)
4281 return false;
4282
4283 // FIXME: Handle vector types!
4284 if (Ty->isVectorType())
4285 return false;
4286
Daniel Dunbard53bac72009-09-14 02:20:34 +00004287 // Float types are never treated as "integer like".
4288 if (Ty->isRealFloatingType())
4289 return false;
4290
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004291 // If this is a builtin or pointer type then it is ok.
John McCall9dd450b2009-09-21 23:43:11 +00004292 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004293 return true;
4294
Daniel Dunbar96ebba52010-02-01 23:31:26 +00004295 // Small complex integer types are "integer like".
4296 if (const ComplexType *CT = Ty->getAs<ComplexType>())
4297 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004298
4299 // Single element and zero sized arrays should be allowed, by the definition
4300 // above, but they are not.
4301
4302 // Otherwise, it must be a record type.
4303 const RecordType *RT = Ty->getAs<RecordType>();
4304 if (!RT) return false;
4305
4306 // Ignore records with flexible arrays.
4307 const RecordDecl *RD = RT->getDecl();
4308 if (RD->hasFlexibleArrayMember())
4309 return false;
4310
4311 // Check that all sub-fields are at offset 0, and are themselves "integer
4312 // like".
4313 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
4314
4315 bool HadField = false;
4316 unsigned idx = 0;
4317 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
4318 i != e; ++i, ++idx) {
David Blaikie40ed2972012-06-06 20:45:41 +00004319 const FieldDecl *FD = *i;
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004320
Daniel Dunbar45c7ff12010-01-29 03:22:29 +00004321 // Bit-fields are not addressable, we only need to verify they are "integer
4322 // like". We still have to disallow a subsequent non-bitfield, for example:
4323 // struct { int : 0; int x }
4324 // is non-integer like according to gcc.
4325 if (FD->isBitField()) {
4326 if (!RD->isUnion())
4327 HadField = true;
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004328
Daniel Dunbar45c7ff12010-01-29 03:22:29 +00004329 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
4330 return false;
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004331
Daniel Dunbar45c7ff12010-01-29 03:22:29 +00004332 continue;
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004333 }
4334
Daniel Dunbar45c7ff12010-01-29 03:22:29 +00004335 // Check if this field is at offset 0.
4336 if (Layout.getFieldOffset(idx) != 0)
4337 return false;
4338
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004339 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
4340 return false;
Michael J. Spencerb2f376b2010-08-25 18:17:27 +00004341
Daniel Dunbar45c7ff12010-01-29 03:22:29 +00004342 // Only allow at most one field in a structure. This doesn't match the
4343 // wording above, but follows gcc in situations with a field following an
4344 // empty structure.
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004345 if (!RD->isUnion()) {
4346 if (HadField)
4347 return false;
4348
4349 HadField = true;
4350 }
4351 }
4352
4353 return true;
4354}
4355
Oliver Stannard405bded2014-02-11 09:25:50 +00004356ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
4357 bool isVariadic) const {
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004358 if (RetTy->isVoidType())
Anton Korobeynikov244360d2009-06-05 22:08:42 +00004359 return ABIArgInfo::getIgnore();
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004360
Daniel Dunbar19964db2010-09-23 01:54:32 +00004361 // Large vector types should be returned via memory.
Oliver Stannard405bded2014-02-11 09:25:50 +00004362 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
4363 markAllocatedGPRs(1, 1);
Daniel Dunbar19964db2010-09-23 01:54:32 +00004364 return ABIArgInfo::getIndirect(0);
Oliver Stannard405bded2014-02-11 09:25:50 +00004365 }
Daniel Dunbar19964db2010-09-23 01:54:32 +00004366
John McCalla1dee5302010-08-22 10:59:02 +00004367 if (!isAggregateTypeForABI(RetTy)) {
Douglas Gregora71cc152010-02-02 20:10:50 +00004368 // Treat an enum type as its underlying type.
4369 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4370 RetTy = EnumTy->getDecl()->getIntegerType();
4371
Anton Korobeynikov18adbf52009-06-06 09:36:29 +00004372 return (RetTy->isPromotableIntegerType() ?
4373 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Douglas Gregora71cc152010-02-02 20:10:50 +00004374 }
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004375
Rafael Espindolabbd44ef2010-06-08 02:42:08 +00004376 // Structures with either a non-trivial destructor or a non-trivial
4377 // copy constructor are always indirect.
Oliver Stannard405bded2014-02-11 09:25:50 +00004378 if (isRecordReturnIndirect(RetTy, getCXXABI())) {
4379 markAllocatedGPRs(1, 1);
Rafael Espindolabbd44ef2010-06-08 02:42:08 +00004380 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
Oliver Stannard405bded2014-02-11 09:25:50 +00004381 }
Rafael Espindolabbd44ef2010-06-08 02:42:08 +00004382
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004383 // Are we following APCS?
4384 if (getABIKind() == APCS) {
Chris Lattner458b2aa2010-07-29 02:16:43 +00004385 if (isEmptyRecord(getContext(), RetTy, false))
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004386 return ABIArgInfo::getIgnore();
4387
Daniel Dunbareedf1512010-02-01 23:31:19 +00004388 // Complex types are all returned as packed integers.
4389 //
4390 // FIXME: Consider using 2 x vector types if the back end handles them
4391 // correctly.
4392 if (RetTy->isAnyComplexType())
Chris Lattnerfe34c1d2010-07-29 06:26:06 +00004393 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
Chris Lattner458b2aa2010-07-29 02:16:43 +00004394 getContext().getTypeSize(RetTy)));
Daniel Dunbareedf1512010-02-01 23:31:19 +00004395
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004396 // Integer like structures are returned in r0.
Chris Lattner458b2aa2010-07-29 02:16:43 +00004397 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004398 // Return in the smallest viable integer type.
Chris Lattner458b2aa2010-07-29 02:16:43 +00004399 uint64_t Size = getContext().getTypeSize(RetTy);
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004400 if (Size <= 8)
Chris Lattnerfe34c1d2010-07-29 06:26:06 +00004401 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004402 if (Size <= 16)
Chris Lattnerfe34c1d2010-07-29 06:26:06 +00004403 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
4404 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004405 }
4406
4407 // Otherwise return in memory.
Oliver Stannard405bded2014-02-11 09:25:50 +00004408 markAllocatedGPRs(1, 1);
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004409 return ABIArgInfo::getIndirect(0);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00004410 }
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004411
4412 // Otherwise this is an AAPCS variant.
4413
Chris Lattner458b2aa2010-07-29 02:16:43 +00004414 if (isEmptyRecord(getContext(), RetTy, true))
Daniel Dunbar1ce72512009-09-14 00:56:55 +00004415 return ABIArgInfo::getIgnore();
4416
Bob Wilson1d9269a2011-11-02 04:51:36 +00004417 // Check for homogeneous aggregates with AAPCS-VFP.
Amara Emerson9dc78782014-01-28 10:56:36 +00004418 if (getABIKind() == AAPCS_VFP && !isVariadic) {
Bob Wilson1d9269a2011-11-02 04:51:36 +00004419 const Type *Base = 0;
Anton Korobeynikov4215ca72012-04-13 11:22:00 +00004420 if (isHomogeneousAggregate(RetTy, Base, getContext())) {
4421 assert(Base && "Base class should be set for homogeneous aggregate");
Bob Wilson1d9269a2011-11-02 04:51:36 +00004422 // Homogeneous Aggregates are returned directly.
4423 return ABIArgInfo::getDirect();
Anton Korobeynikov4215ca72012-04-13 11:22:00 +00004424 }
Bob Wilson1d9269a2011-11-02 04:51:36 +00004425 }
4426
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004427 // Aggregates <= 4 bytes are returned in r0; other aggregates
4428 // are returned indirectly.
Chris Lattner458b2aa2010-07-29 02:16:43 +00004429 uint64_t Size = getContext().getTypeSize(RetTy);
Daniel Dunbar1ce72512009-09-14 00:56:55 +00004430 if (Size <= 32) {
4431 // Return in the smallest viable integer type.
4432 if (Size <= 8)
Chris Lattnerfe34c1d2010-07-29 06:26:06 +00004433 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
Daniel Dunbar1ce72512009-09-14 00:56:55 +00004434 if (Size <= 16)
Chris Lattnerfe34c1d2010-07-29 06:26:06 +00004435 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
4436 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
Daniel Dunbar1ce72512009-09-14 00:56:55 +00004437 }
4438
Oliver Stannard405bded2014-02-11 09:25:50 +00004439 markAllocatedGPRs(1, 1);
Daniel Dunbar626f1d82009-09-13 08:03:58 +00004440 return ABIArgInfo::getIndirect(0);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00004441}
4442
Manman Renfef9e312012-10-16 19:18:39 +00004443/// isIllegalVector - check whether Ty is an illegal vector type.
4444bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
4445 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4446 // Check whether VT is legal.
4447 unsigned NumElements = VT->getNumElements();
4448 uint64_t Size = getContext().getTypeSize(VT);
4449 // NumElements should be power of 2.
4450 if ((NumElements & (NumElements - 1)) != 0)
4451 return true;
4452 // Size should be greater than 32 bits.
4453 return Size <= 32;
4454 }
4455 return false;
4456}
4457
Anton Korobeynikov244360d2009-06-05 22:08:42 +00004458llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
Chris Lattner5e016ae2010-06-27 07:15:29 +00004459 CodeGenFunction &CGF) const {
Chris Lattnerece04092012-02-07 00:39:47 +00004460 llvm::Type *BP = CGF.Int8PtrTy;
4461 llvm::Type *BPP = CGF.Int8PtrPtrTy;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00004462
4463 CGBuilderTy &Builder = CGF.Builder;
Chris Lattnerece04092012-02-07 00:39:47 +00004464 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
Anton Korobeynikov244360d2009-06-05 22:08:42 +00004465 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
Manman Rencca54d02012-10-16 19:01:37 +00004466
Tim Northover1711cc92013-06-21 23:05:33 +00004467 if (isEmptyRecord(getContext(), Ty, true)) {
4468 // These are ignored for parameter passing purposes.
4469 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4470 return Builder.CreateBitCast(Addr, PTy);
4471 }
4472
Manman Rencca54d02012-10-16 19:01:37 +00004473 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
Rafael Espindola11d994b2011-08-02 22:33:37 +00004474 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
Manman Renfef9e312012-10-16 19:18:39 +00004475 bool IsIndirect = false;
Manman Rencca54d02012-10-16 19:01:37 +00004476
4477 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
4478 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
Manman Ren67effb92012-10-16 19:51:48 +00004479 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
4480 getABIKind() == ARMABIInfo::AAPCS)
4481 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
4482 else
4483 TyAlign = 4;
Manman Renfef9e312012-10-16 19:18:39 +00004484 // Use indirect if size of the illegal vector is bigger than 16 bytes.
4485 if (isIllegalVectorType(Ty) && Size > 16) {
4486 IsIndirect = true;
4487 Size = 4;
4488 TyAlign = 4;
4489 }
Manman Rencca54d02012-10-16 19:01:37 +00004490
4491 // Handle address alignment for ABI alignment > 4 bytes.
Rafael Espindola11d994b2011-08-02 22:33:37 +00004492 if (TyAlign > 4) {
4493 assert((TyAlign & (TyAlign - 1)) == 0 &&
4494 "Alignment is not power of 2!");
4495 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
4496 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
4497 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
Manman Rencca54d02012-10-16 19:01:37 +00004498 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
Rafael Espindola11d994b2011-08-02 22:33:37 +00004499 }
Anton Korobeynikov244360d2009-06-05 22:08:42 +00004500
4501 uint64_t Offset =
Manman Rencca54d02012-10-16 19:01:37 +00004502 llvm::RoundUpToAlignment(Size, 4);
Anton Korobeynikov244360d2009-06-05 22:08:42 +00004503 llvm::Value *NextAddr =
Chris Lattner5e016ae2010-06-27 07:15:29 +00004504 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
Anton Korobeynikov244360d2009-06-05 22:08:42 +00004505 "ap.next");
4506 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
4507
Manman Renfef9e312012-10-16 19:18:39 +00004508 if (IsIndirect)
4509 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
Manman Ren67effb92012-10-16 19:51:48 +00004510 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) {
Manman Rencca54d02012-10-16 19:01:37 +00004511 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur
4512 // may not be correctly aligned for the vector type. We create an aligned
4513 // temporary space and copy the content over from ap.cur to the temporary
4514 // space. This is necessary if the natural alignment of the type is greater
4515 // than the ABI alignment.
4516 llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
4517 CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
4518 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
4519 "var.align");
4520 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
4521 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
4522 Builder.CreateMemCpy(Dst, Src,
4523 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
4524 TyAlign, false);
4525 Addr = AlignedTemp; //The content is in aligned location.
4526 }
4527 llvm::Type *PTy =
4528 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4529 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
4530
Anton Korobeynikov244360d2009-06-05 22:08:42 +00004531 return AddrTyped;
4532}
4533
Benjamin Kramer1cdb23d2012-10-20 13:02:06 +00004534namespace {
4535
Derek Schuffa2020962012-10-16 22:30:41 +00004536class NaClARMABIInfo : public ABIInfo {
4537 public:
4538 NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
4539 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {}
Craig Topper4f12f102014-03-12 06:41:41 +00004540 void computeInfo(CGFunctionInfo &FI) const override;
4541 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4542 CodeGenFunction &CGF) const override;
Derek Schuffa2020962012-10-16 22:30:41 +00004543 private:
4544 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
4545 ARMABIInfo NInfo; // Used for everything else.
4546};
4547
4548class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo {
4549 public:
4550 NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
4551 : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {}
4552};
4553
Benjamin Kramer1cdb23d2012-10-20 13:02:06 +00004554}
4555
Derek Schuffa2020962012-10-16 22:30:41 +00004556void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
4557 if (FI.getASTCallingConvention() == CC_PnaclCall)
4558 PInfo.computeInfo(FI);
4559 else
4560 static_cast<const ABIInfo&>(NInfo).computeInfo(FI);
4561}
4562
4563llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4564 CodeGenFunction &CGF) const {
4565 // Always use the native convention; calling pnacl-style varargs functions
4566 // is unsupported.
4567 return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF);
4568}
4569
Chris Lattner0cf24192010-06-28 20:05:43 +00004570//===----------------------------------------------------------------------===//
Tim Northover9bb857a2013-01-31 12:13:10 +00004571// AArch64 ABI Implementation
4572//===----------------------------------------------------------------------===//
4573
4574namespace {
4575
4576class AArch64ABIInfo : public ABIInfo {
4577public:
4578 AArch64ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
4579
4580private:
4581 // The AArch64 PCS is explicit about return types and argument types being
4582 // handled identically, so we don't need to draw a distinction between
4583 // Argument and Return classification.
4584 ABIArgInfo classifyGenericType(QualType Ty, int &FreeIntRegs,
4585 int &FreeVFPRegs) const;
4586
4587 ABIArgInfo tryUseRegs(QualType Ty, int &FreeRegs, int RegsNeeded, bool IsInt,
4588 llvm::Type *DirectTy = 0) const;
4589
Craig Topper4f12f102014-03-12 06:41:41 +00004590 void computeInfo(CGFunctionInfo &FI) const override;
Tim Northover9bb857a2013-01-31 12:13:10 +00004591
Craig Topper4f12f102014-03-12 06:41:41 +00004592 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4593 CodeGenFunction &CGF) const override;
Tim Northover9bb857a2013-01-31 12:13:10 +00004594};
4595
4596class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
4597public:
4598 AArch64TargetCodeGenInfo(CodeGenTypes &CGT)
4599 :TargetCodeGenInfo(new AArch64ABIInfo(CGT)) {}
4600
4601 const AArch64ABIInfo &getABIInfo() const {
4602 return static_cast<const AArch64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
4603 }
4604
Craig Topper4f12f102014-03-12 06:41:41 +00004605 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
Tim Northover9bb857a2013-01-31 12:13:10 +00004606 return 31;
4607 }
4608
4609 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
Craig Topper4f12f102014-03-12 06:41:41 +00004610 llvm::Value *Address) const override {
Tim Northover9bb857a2013-01-31 12:13:10 +00004611 // 0-31 are x0-x30 and sp: 8 bytes each
4612 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
4613 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 31);
4614
4615 // 64-95 are v0-v31: 16 bytes each
4616 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
4617 AssignToArrayRange(CGF.Builder, Address, Sixteen8, 64, 95);
4618
4619 return false;
4620 }
4621
4622};
4623
4624}
4625
4626void AArch64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
4627 int FreeIntRegs = 8, FreeVFPRegs = 8;
4628
4629 FI.getReturnInfo() = classifyGenericType(FI.getReturnType(),
4630 FreeIntRegs, FreeVFPRegs);
4631
4632 FreeIntRegs = FreeVFPRegs = 8;
Aaron Ballmanec47bc22014-03-17 18:10:01 +00004633 for (auto &I : FI.arguments()) {
4634 I.info = classifyGenericType(I.type, FreeIntRegs, FreeVFPRegs);
Tim Northover9bb857a2013-01-31 12:13:10 +00004635
4636 }
4637}
4638
4639ABIArgInfo
4640AArch64ABIInfo::tryUseRegs(QualType Ty, int &FreeRegs, int RegsNeeded,
4641 bool IsInt, llvm::Type *DirectTy) const {
4642 if (FreeRegs >= RegsNeeded) {
4643 FreeRegs -= RegsNeeded;
4644 return ABIArgInfo::getDirect(DirectTy);
4645 }
4646
4647 llvm::Type *Padding = 0;
4648
4649 // We need padding so that later arguments don't get filled in anyway. That
4650 // wouldn't happen if only ByVal arguments followed in the same category, but
4651 // a large structure will simply seem to be a pointer as far as LLVM is
4652 // concerned.
4653 if (FreeRegs > 0) {
4654 if (IsInt)
4655 Padding = llvm::Type::getInt64Ty(getVMContext());
4656 else
4657 Padding = llvm::Type::getFloatTy(getVMContext());
4658
4659 // Either [N x i64] or [N x float].
4660 Padding = llvm::ArrayType::get(Padding, FreeRegs);
4661 FreeRegs = 0;
4662 }
4663
4664 return ABIArgInfo::getIndirect(getContext().getTypeAlign(Ty) / 8,
4665 /*IsByVal=*/ true, /*Realign=*/ false,
4666 Padding);
4667}
4668
4669
4670ABIArgInfo AArch64ABIInfo::classifyGenericType(QualType Ty,
4671 int &FreeIntRegs,
4672 int &FreeVFPRegs) const {
4673 // Can only occurs for return, but harmless otherwise.
4674 if (Ty->isVoidType())
4675 return ABIArgInfo::getIgnore();
4676
4677 // Large vector types should be returned via memory. There's no such concept
4678 // in the ABI, but they'd be over 16 bytes anyway so no matter how they're
4679 // classified they'd go into memory (see B.3).
4680 if (Ty->isVectorType() && getContext().getTypeSize(Ty) > 128) {
4681 if (FreeIntRegs > 0)
4682 --FreeIntRegs;
4683 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
4684 }
4685
4686 // All non-aggregate LLVM types have a concrete ABI representation so they can
4687 // be passed directly. After this block we're guaranteed to be in a
4688 // complicated case.
4689 if (!isAggregateTypeForABI(Ty)) {
4690 // Treat an enum type as its underlying type.
4691 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4692 Ty = EnumTy->getDecl()->getIntegerType();
4693
4694 if (Ty->isFloatingType() || Ty->isVectorType())
4695 return tryUseRegs(Ty, FreeVFPRegs, /*RegsNeeded=*/ 1, /*IsInt=*/ false);
4696
4697 assert(getContext().getTypeSize(Ty) <= 128 &&
4698 "unexpectedly large scalar type");
4699
4700 int RegsNeeded = getContext().getTypeSize(Ty) > 64 ? 2 : 1;
4701
4702 // If the type may need padding registers to ensure "alignment", we must be
4703 // careful when this is accounted for. Increasing the effective size covers
4704 // all cases.
4705 if (getContext().getTypeAlign(Ty) == 128)
4706 RegsNeeded += FreeIntRegs % 2 != 0;
4707
4708 return tryUseRegs(Ty, FreeIntRegs, RegsNeeded, /*IsInt=*/ true);
4709 }
4710
Mark Lacey3825e832013-10-06 01:33:34 +00004711 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +00004712 if (FreeIntRegs > 0 && RAA == CGCXXABI::RAA_Indirect)
Tim Northover9bb857a2013-01-31 12:13:10 +00004713 --FreeIntRegs;
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +00004714 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
Tim Northover9bb857a2013-01-31 12:13:10 +00004715 }
4716
4717 if (isEmptyRecord(getContext(), Ty, true)) {
4718 if (!getContext().getLangOpts().CPlusPlus) {
4719 // Empty structs outside C++ mode are a GNU extension, so no ABI can
4720 // possibly tell us what to do. It turns out (I believe) that GCC ignores
4721 // the object for parameter-passsing purposes.
4722 return ABIArgInfo::getIgnore();
4723 }
4724
4725 // The combination of C++98 9p5 (sizeof(struct) != 0) and the pseudocode
4726 // description of va_arg in the PCS require that an empty struct does
4727 // actually occupy space for parameter-passing. I'm hoping for a
4728 // clarification giving an explicit paragraph to point to in future.
4729 return tryUseRegs(Ty, FreeIntRegs, /*RegsNeeded=*/ 1, /*IsInt=*/ true,
4730 llvm::Type::getInt8Ty(getVMContext()));
4731 }
4732
4733 // Homogeneous vector aggregates get passed in registers or on the stack.
4734 const Type *Base = 0;
4735 uint64_t NumMembers = 0;
4736 if (isHomogeneousAggregate(Ty, Base, getContext(), &NumMembers)) {
4737 assert(Base && "Base class should be set for homogeneous aggregate");
4738 // Homogeneous aggregates are passed and returned directly.
4739 return tryUseRegs(Ty, FreeVFPRegs, /*RegsNeeded=*/ NumMembers,
4740 /*IsInt=*/ false);
4741 }
4742
4743 uint64_t Size = getContext().getTypeSize(Ty);
4744 if (Size <= 128) {
4745 // Small structs can use the same direct type whether they're in registers
4746 // or on the stack.
4747 llvm::Type *BaseTy;
4748 unsigned NumBases;
4749 int SizeInRegs = (Size + 63) / 64;
4750
4751 if (getContext().getTypeAlign(Ty) == 128) {
4752 BaseTy = llvm::Type::getIntNTy(getVMContext(), 128);
4753 NumBases = 1;
4754
4755 // If the type may need padding registers to ensure "alignment", we must
4756 // be careful when this is accounted for. Increasing the effective size
4757 // covers all cases.
4758 SizeInRegs += FreeIntRegs % 2 != 0;
4759 } else {
4760 BaseTy = llvm::Type::getInt64Ty(getVMContext());
4761 NumBases = SizeInRegs;
4762 }
4763 llvm::Type *DirectTy = llvm::ArrayType::get(BaseTy, NumBases);
4764
4765 return tryUseRegs(Ty, FreeIntRegs, /*RegsNeeded=*/ SizeInRegs,
4766 /*IsInt=*/ true, DirectTy);
4767 }
4768
4769 // If the aggregate is > 16 bytes, it's passed and returned indirectly. In
4770 // LLVM terms the return uses an "sret" pointer, but that's handled elsewhere.
4771 --FreeIntRegs;
4772 return ABIArgInfo::getIndirect(0, /* byVal = */ false);
4773}
4774
4775llvm::Value *AArch64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4776 CodeGenFunction &CGF) const {
Tim Northover9bb857a2013-01-31 12:13:10 +00004777 int FreeIntRegs = 8, FreeVFPRegs = 8;
4778 Ty = CGF.getContext().getCanonicalType(Ty);
4779 ABIArgInfo AI = classifyGenericType(Ty, FreeIntRegs, FreeVFPRegs);
4780
Tim Northovera2ee4332014-03-29 15:09:45 +00004781 return EmitAArch64VAArg(VAListAddr, Ty, 8 - FreeIntRegs, 8 - FreeVFPRegs,
4782 AI.isIndirect(), CGF);
Tim Northover9bb857a2013-01-31 12:13:10 +00004783}
4784
4785//===----------------------------------------------------------------------===//
Justin Holewinski83e96682012-05-24 17:43:12 +00004786// NVPTX ABI Implementation
Justin Holewinskibd4a3c02011-04-22 11:10:38 +00004787//===----------------------------------------------------------------------===//
4788
4789namespace {
4790
Justin Holewinski83e96682012-05-24 17:43:12 +00004791class NVPTXABIInfo : public ABIInfo {
Justin Holewinskibd4a3c02011-04-22 11:10:38 +00004792public:
Justin Holewinski36837432013-03-30 14:38:24 +00004793 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
Justin Holewinskibd4a3c02011-04-22 11:10:38 +00004794
4795 ABIArgInfo classifyReturnType(QualType RetTy) const;
4796 ABIArgInfo classifyArgumentType(QualType Ty) const;
4797
Craig Topper4f12f102014-03-12 06:41:41 +00004798 void computeInfo(CGFunctionInfo &FI) const override;
4799 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4800 CodeGenFunction &CFG) const override;
Justin Holewinskibd4a3c02011-04-22 11:10:38 +00004801};
4802
Justin Holewinski83e96682012-05-24 17:43:12 +00004803class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
Justin Holewinskibd4a3c02011-04-22 11:10:38 +00004804public:
Justin Holewinski83e96682012-05-24 17:43:12 +00004805 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
4806 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
Craig Topper4f12f102014-03-12 06:41:41 +00004807
4808 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4809 CodeGen::CodeGenModule &M) const override;
Justin Holewinski36837432013-03-30 14:38:24 +00004810private:
Eli Benderskye06a2c42014-04-15 16:57:05 +00004811 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
4812 // resulting MDNode to the nvvm.annotations MDNode.
4813 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
Justin Holewinskibd4a3c02011-04-22 11:10:38 +00004814};
4815
Justin Holewinski83e96682012-05-24 17:43:12 +00004816ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
Justin Holewinskibd4a3c02011-04-22 11:10:38 +00004817 if (RetTy->isVoidType())
4818 return ABIArgInfo::getIgnore();
Justin Holewinskif9329ff2013-11-20 20:35:34 +00004819
4820 // note: this is different from default ABI
4821 if (!RetTy->isScalarType())
4822 return ABIArgInfo::getDirect();
4823
4824 // Treat an enum type as its underlying type.
4825 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4826 RetTy = EnumTy->getDecl()->getIntegerType();
4827
4828 return (RetTy->isPromotableIntegerType() ?
4829 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Justin Holewinskibd4a3c02011-04-22 11:10:38 +00004830}
4831
Justin Holewinski83e96682012-05-24 17:43:12 +00004832ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
Justin Holewinskif9329ff2013-11-20 20:35:34 +00004833 // Treat an enum type as its underlying type.
4834 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4835 Ty = EnumTy->getDecl()->getIntegerType();
Justin Holewinskibd4a3c02011-04-22 11:10:38 +00004836
Justin Holewinskif9329ff2013-11-20 20:35:34 +00004837 return (Ty->isPromotableIntegerType() ?
4838 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
Justin Holewinskibd4a3c02011-04-22 11:10:38 +00004839}
4840
Justin Holewinski83e96682012-05-24 17:43:12 +00004841void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
Justin Holewinskibd4a3c02011-04-22 11:10:38 +00004842 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
Aaron Ballmanec47bc22014-03-17 18:10:01 +00004843 for (auto &I : FI.arguments())
4844 I.info = classifyArgumentType(I.type);
Justin Holewinskibd4a3c02011-04-22 11:10:38 +00004845
4846 // Always honor user-specified calling convention.
4847 if (FI.getCallingConvention() != llvm::CallingConv::C)
4848 return;
4849
John McCall882987f2013-02-28 19:01:20 +00004850 FI.setEffectiveCallingConvention(getRuntimeCC());
4851}
4852
Justin Holewinski83e96682012-05-24 17:43:12 +00004853llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4854 CodeGenFunction &CFG) const {
4855 llvm_unreachable("NVPTX does not support varargs");
Justin Holewinskibd4a3c02011-04-22 11:10:38 +00004856}
4857
Justin Holewinski83e96682012-05-24 17:43:12 +00004858void NVPTXTargetCodeGenInfo::
4859SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4860 CodeGen::CodeGenModule &M) const{
Justin Holewinski38031972011-10-05 17:58:44 +00004861 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4862 if (!FD) return;
4863
4864 llvm::Function *F = cast<llvm::Function>(GV);
4865
4866 // Perform special handling in OpenCL mode
David Blaikiebbafb8a2012-03-11 07:00:24 +00004867 if (M.getLangOpts().OpenCL) {
Justin Holewinski36837432013-03-30 14:38:24 +00004868 // Use OpenCL function attributes to check for kernel functions
Justin Holewinski38031972011-10-05 17:58:44 +00004869 // By default, all functions are device functions
Justin Holewinski38031972011-10-05 17:58:44 +00004870 if (FD->hasAttr<OpenCLKernelAttr>()) {
Justin Holewinski36837432013-03-30 14:38:24 +00004871 // OpenCL __kernel functions get kernel metadata
Eli Benderskye06a2c42014-04-15 16:57:05 +00004872 // Create !{<func-ref>, metadata !"kernel", i32 1} node
4873 addNVVMMetadata(F, "kernel", 1);
Justin Holewinski38031972011-10-05 17:58:44 +00004874 // And kernel functions are not subject to inlining
Bill Wendling207f0532012-12-20 19:27:06 +00004875 F->addFnAttr(llvm::Attribute::NoInline);
Justin Holewinski38031972011-10-05 17:58:44 +00004876 }
Peter Collingbourne5bad4af2011-10-06 16:49:54 +00004877 }
Justin Holewinski38031972011-10-05 17:58:44 +00004878
Peter Collingbourne5bad4af2011-10-06 16:49:54 +00004879 // Perform special handling in CUDA mode.
David Blaikiebbafb8a2012-03-11 07:00:24 +00004880 if (M.getLangOpts().CUDA) {
Justin Holewinski36837432013-03-30 14:38:24 +00004881 // CUDA __global__ functions get a kernel metadata entry. Since
Peter Collingbourne5bad4af2011-10-06 16:49:54 +00004882 // __global__ functions cannot be called from the device, we do not
4883 // need to set the noinline attribute.
Eli Benderskye06a2c42014-04-15 16:57:05 +00004884 if (FD->hasAttr<CUDAGlobalAttr>()) {
4885 // Create !{<func-ref>, metadata !"kernel", i32 1} node
4886 addNVVMMetadata(F, "kernel", 1);
4887 }
4888 if (FD->hasAttr<CUDALaunchBoundsAttr>()) {
4889 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
4890 addNVVMMetadata(F, "maxntidx",
4891 FD->getAttr<CUDALaunchBoundsAttr>()->getMaxThreads());
4892 // min blocks is a default argument for CUDALaunchBoundsAttr, so getting a
4893 // zero value from getMinBlocks either means it was not specified in
4894 // __launch_bounds__ or the user specified a 0 value. In both cases, we
4895 // don't have to add a PTX directive.
4896 int MinCTASM = FD->getAttr<CUDALaunchBoundsAttr>()->getMinBlocks();
4897 if (MinCTASM > 0) {
4898 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
4899 addNVVMMetadata(F, "minctasm", MinCTASM);
4900 }
4901 }
Justin Holewinski38031972011-10-05 17:58:44 +00004902 }
4903}
4904
Eli Benderskye06a2c42014-04-15 16:57:05 +00004905void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
4906 int Operand) {
Justin Holewinski36837432013-03-30 14:38:24 +00004907 llvm::Module *M = F->getParent();
4908 llvm::LLVMContext &Ctx = M->getContext();
4909
4910 // Get "nvvm.annotations" metadata node
4911 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
4912
Eli Benderskye1627b42014-04-15 17:19:26 +00004913 llvm::Value *MDVals[] = {
4914 F, llvm::MDString::get(Ctx, Name),
4915 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand)};
Justin Holewinski36837432013-03-30 14:38:24 +00004916 // Append metadata to nvvm.annotations
4917 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
4918}
Justin Holewinskibd4a3c02011-04-22 11:10:38 +00004919}
4920
4921//===----------------------------------------------------------------------===//
Ulrich Weigand47445072013-05-06 16:26:41 +00004922// SystemZ ABI Implementation
4923//===----------------------------------------------------------------------===//
4924
4925namespace {
4926
4927class SystemZABIInfo : public ABIInfo {
4928public:
4929 SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
4930
4931 bool isPromotableIntegerType(QualType Ty) const;
4932 bool isCompoundType(QualType Ty) const;
4933 bool isFPArgumentType(QualType Ty) const;
4934
4935 ABIArgInfo classifyReturnType(QualType RetTy) const;
4936 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
4937
Craig Topper4f12f102014-03-12 06:41:41 +00004938 void computeInfo(CGFunctionInfo &FI) const override {
Ulrich Weigand47445072013-05-06 16:26:41 +00004939 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
Aaron Ballmanec47bc22014-03-17 18:10:01 +00004940 for (auto &I : FI.arguments())
4941 I.info = classifyArgumentType(I.type);
Ulrich Weigand47445072013-05-06 16:26:41 +00004942 }
4943
Craig Topper4f12f102014-03-12 06:41:41 +00004944 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4945 CodeGenFunction &CGF) const override;
Ulrich Weigand47445072013-05-06 16:26:41 +00004946};
4947
4948class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
4949public:
4950 SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
4951 : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
4952};
4953
4954}
4955
4956bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
4957 // Treat an enum type as its underlying type.
4958 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4959 Ty = EnumTy->getDecl()->getIntegerType();
4960
4961 // Promotable integer types are required to be promoted by the ABI.
4962 if (Ty->isPromotableIntegerType())
4963 return true;
4964
4965 // 32-bit values must also be promoted.
4966 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4967 switch (BT->getKind()) {
4968 case BuiltinType::Int:
4969 case BuiltinType::UInt:
4970 return true;
4971 default:
4972 return false;
4973 }
4974 return false;
4975}
4976
4977bool SystemZABIInfo::isCompoundType(QualType Ty) const {
4978 return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty);
4979}
4980
4981bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
4982 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4983 switch (BT->getKind()) {
4984 case BuiltinType::Float:
4985 case BuiltinType::Double:
4986 return true;
4987 default:
4988 return false;
4989 }
4990
4991 if (const RecordType *RT = Ty->getAsStructureType()) {
4992 const RecordDecl *RD = RT->getDecl();
4993 bool Found = false;
4994
4995 // If this is a C++ record, check the bases first.
4996 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
Aaron Ballman574705e2014-03-13 15:41:46 +00004997 for (const auto &I : CXXRD->bases()) {
4998 QualType Base = I.getType();
Ulrich Weigand47445072013-05-06 16:26:41 +00004999
5000 // Empty bases don't affect things either way.
5001 if (isEmptyRecord(getContext(), Base, true))
5002 continue;
5003
5004 if (Found)
5005 return false;
5006 Found = isFPArgumentType(Base);
5007 if (!Found)
5008 return false;
5009 }
5010
5011 // Check the fields.
Aaron Ballmane8a8bae2014-03-08 20:12:42 +00005012 for (const auto *FD : RD->fields()) {
Ulrich Weigand47445072013-05-06 16:26:41 +00005013 // Empty bitfields don't affect things either way.
5014 // Unlike isSingleElementStruct(), empty structure and array fields
5015 // do count. So do anonymous bitfields that aren't zero-sized.
5016 if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
5017 return true;
5018
5019 // Unlike isSingleElementStruct(), arrays do not count.
5020 // Nested isFPArgumentType structures still do though.
5021 if (Found)
5022 return false;
5023 Found = isFPArgumentType(FD->getType());
5024 if (!Found)
5025 return false;
5026 }
5027
5028 // Unlike isSingleElementStruct(), trailing padding is allowed.
5029 // An 8-byte aligned struct s { float f; } is passed as a double.
5030 return Found;
5031 }
5032
5033 return false;
5034}
5035
5036llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5037 CodeGenFunction &CGF) const {
5038 // Assume that va_list type is correct; should be pointer to LLVM type:
5039 // struct {
5040 // i64 __gpr;
5041 // i64 __fpr;
5042 // i8 *__overflow_arg_area;
5043 // i8 *__reg_save_area;
5044 // };
5045
5046 // Every argument occupies 8 bytes and is passed by preference in either
5047 // GPRs or FPRs.
5048 Ty = CGF.getContext().getCanonicalType(Ty);
5049 ABIArgInfo AI = classifyArgumentType(Ty);
5050 bool InFPRs = isFPArgumentType(Ty);
5051
5052 llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
5053 bool IsIndirect = AI.isIndirect();
5054 unsigned UnpaddedBitSize;
5055 if (IsIndirect) {
5056 APTy = llvm::PointerType::getUnqual(APTy);
5057 UnpaddedBitSize = 64;
5058 } else
5059 UnpaddedBitSize = getContext().getTypeSize(Ty);
5060 unsigned PaddedBitSize = 64;
5061 assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size.");
5062
5063 unsigned PaddedSize = PaddedBitSize / 8;
5064 unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
5065
5066 unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
5067 if (InFPRs) {
5068 MaxRegs = 4; // Maximum of 4 FPR arguments
5069 RegCountField = 1; // __fpr
5070 RegSaveIndex = 16; // save offset for f0
5071 RegPadding = 0; // floats are passed in the high bits of an FPR
5072 } else {
5073 MaxRegs = 5; // Maximum of 5 GPR arguments
5074 RegCountField = 0; // __gpr
5075 RegSaveIndex = 2; // save offset for r2
5076 RegPadding = Padding; // values are passed in the low bits of a GPR
5077 }
5078
5079 llvm::Value *RegCountPtr =
5080 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
5081 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
5082 llvm::Type *IndexTy = RegCount->getType();
5083 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
5084 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
Oliver Stannard405bded2014-02-11 09:25:50 +00005085 "fits_in_regs");
Ulrich Weigand47445072013-05-06 16:26:41 +00005086
5087 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5088 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
5089 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5090 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
5091
5092 // Emit code to load the value if it was passed in registers.
5093 CGF.EmitBlock(InRegBlock);
5094
5095 // Work out the address of an argument register.
5096 llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
5097 llvm::Value *ScaledRegCount =
5098 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
5099 llvm::Value *RegBase =
5100 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding);
5101 llvm::Value *RegOffset =
5102 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
5103 llvm::Value *RegSaveAreaPtr =
5104 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
5105 llvm::Value *RegSaveArea =
5106 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
5107 llvm::Value *RawRegAddr =
5108 CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr");
5109 llvm::Value *RegAddr =
5110 CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr");
5111
5112 // Update the register count
5113 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
5114 llvm::Value *NewRegCount =
5115 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
5116 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
5117 CGF.EmitBranch(ContBlock);
5118
5119 // Emit code to load the value if it was passed in memory.
5120 CGF.EmitBlock(InMemBlock);
5121
5122 // Work out the address of a stack argument.
5123 llvm::Value *OverflowArgAreaPtr =
5124 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
5125 llvm::Value *OverflowArgArea =
5126 CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
5127 llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding);
5128 llvm::Value *RawMemAddr =
5129 CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr");
5130 llvm::Value *MemAddr =
5131 CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr");
5132
5133 // Update overflow_arg_area_ptr pointer
5134 llvm::Value *NewOverflowArgArea =
5135 CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
5136 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
5137 CGF.EmitBranch(ContBlock);
5138
5139 // Return the appropriate result.
5140 CGF.EmitBlock(ContBlock);
5141 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr");
5142 ResAddr->addIncoming(RegAddr, InRegBlock);
5143 ResAddr->addIncoming(MemAddr, InMemBlock);
5144
5145 if (IsIndirect)
5146 return CGF.Builder.CreateLoad(ResAddr, "indirect_arg");
5147
5148 return ResAddr;
5149}
5150
John McCall1fe2a8c2013-06-18 02:46:29 +00005151bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
5152 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
5153 assert(Triple.getArch() == llvm::Triple::x86);
5154
5155 switch (Opts.getStructReturnConvention()) {
5156 case CodeGenOptions::SRCK_Default:
5157 break;
5158 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
5159 return false;
5160 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
5161 return true;
5162 }
5163
5164 if (Triple.isOSDarwin())
5165 return true;
5166
5167 switch (Triple.getOS()) {
John McCall1fe2a8c2013-06-18 02:46:29 +00005168 case llvm::Triple::AuroraUX:
5169 case llvm::Triple::DragonFly:
5170 case llvm::Triple::FreeBSD:
5171 case llvm::Triple::OpenBSD:
5172 case llvm::Triple::Bitrig:
John McCall1fe2a8c2013-06-18 02:46:29 +00005173 return true;
Saleem Abdulrasool377066a2014-03-27 22:50:18 +00005174 case llvm::Triple::Win32:
5175 switch (Triple.getEnvironment()) {
5176 case llvm::Triple::UnknownEnvironment:
5177 case llvm::Triple::Cygnus:
5178 case llvm::Triple::GNU:
5179 case llvm::Triple::MSVC:
5180 return true;
5181 default:
5182 return false;
5183 }
John McCall1fe2a8c2013-06-18 02:46:29 +00005184 default:
5185 return false;
5186 }
5187}
Ulrich Weigand47445072013-05-06 16:26:41 +00005188
5189ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
5190 if (RetTy->isVoidType())
5191 return ABIArgInfo::getIgnore();
5192 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
5193 return ABIArgInfo::getIndirect(0);
5194 return (isPromotableIntegerType(RetTy) ?
5195 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5196}
5197
5198ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
5199 // Handle the generic C++ ABI.
Mark Lacey3825e832013-10-06 01:33:34 +00005200 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
Ulrich Weigand47445072013-05-06 16:26:41 +00005201 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5202
5203 // Integers and enums are extended to full register width.
5204 if (isPromotableIntegerType(Ty))
5205 return ABIArgInfo::getExtend();
5206
5207 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
5208 uint64_t Size = getContext().getTypeSize(Ty);
5209 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
Richard Sandifordcdd86882013-12-04 09:59:57 +00005210 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
Ulrich Weigand47445072013-05-06 16:26:41 +00005211
5212 // Handle small structures.
5213 if (const RecordType *RT = Ty->getAs<RecordType>()) {
5214 // Structures with flexible arrays have variable length, so really
5215 // fail the size test above.
5216 const RecordDecl *RD = RT->getDecl();
5217 if (RD->hasFlexibleArrayMember())
Richard Sandifordcdd86882013-12-04 09:59:57 +00005218 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
Ulrich Weigand47445072013-05-06 16:26:41 +00005219
5220 // The structure is passed as an unextended integer, a float, or a double.
5221 llvm::Type *PassTy;
5222 if (isFPArgumentType(Ty)) {
5223 assert(Size == 32 || Size == 64);
5224 if (Size == 32)
5225 PassTy = llvm::Type::getFloatTy(getVMContext());
5226 else
5227 PassTy = llvm::Type::getDoubleTy(getVMContext());
5228 } else
5229 PassTy = llvm::IntegerType::get(getVMContext(), Size);
5230 return ABIArgInfo::getDirect(PassTy);
5231 }
5232
5233 // Non-structure compounds are passed indirectly.
5234 if (isCompoundType(Ty))
Richard Sandifordcdd86882013-12-04 09:59:57 +00005235 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
Ulrich Weigand47445072013-05-06 16:26:41 +00005236
5237 return ABIArgInfo::getDirect(0);
5238}
5239
5240//===----------------------------------------------------------------------===//
Anton Korobeynikov55bcea12010-01-10 12:58:08 +00005241// MSP430 ABI Implementation
Chris Lattner0cf24192010-06-28 20:05:43 +00005242//===----------------------------------------------------------------------===//
Anton Korobeynikov55bcea12010-01-10 12:58:08 +00005243
5244namespace {
5245
5246class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
5247public:
Chris Lattner2b037972010-07-29 02:01:43 +00005248 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
5249 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
Anton Korobeynikov55bcea12010-01-10 12:58:08 +00005250 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
Craig Topper4f12f102014-03-12 06:41:41 +00005251 CodeGen::CodeGenModule &M) const override;
Anton Korobeynikov55bcea12010-01-10 12:58:08 +00005252};
5253
5254}
5255
5256void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
5257 llvm::GlobalValue *GV,
5258 CodeGen::CodeGenModule &M) const {
5259 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
5260 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
5261 // Handle 'interrupt' attribute:
5262 llvm::Function *F = cast<llvm::Function>(GV);
5263
5264 // Step 1: Set ISR calling convention.
5265 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
5266
5267 // Step 2: Add attributes goodness.
Bill Wendling207f0532012-12-20 19:27:06 +00005268 F->addFnAttr(llvm::Attribute::NoInline);
Anton Korobeynikov55bcea12010-01-10 12:58:08 +00005269
5270 // Step 3: Emit ISR vector alias.
Anton Korobeynikovc5a7f922012-11-26 18:59:10 +00005271 unsigned Num = attr->getNumber() / 2;
Anton Korobeynikov55bcea12010-01-10 12:58:08 +00005272 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
Anton Korobeynikovc5a7f922012-11-26 18:59:10 +00005273 "__isr_" + Twine(Num),
Anton Korobeynikov55bcea12010-01-10 12:58:08 +00005274 GV, &M.getModule());
5275 }
Anton Korobeynikov244360d2009-06-05 22:08:42 +00005276 }
5277}
5278
Chris Lattner0cf24192010-06-28 20:05:43 +00005279//===----------------------------------------------------------------------===//
John McCall943fae92010-05-27 06:19:26 +00005280// MIPS ABI Implementation. This works for both little-endian and
5281// big-endian variants.
Chris Lattner0cf24192010-06-28 20:05:43 +00005282//===----------------------------------------------------------------------===//
5283
John McCall943fae92010-05-27 06:19:26 +00005284namespace {
Akira Hatanakab579fe52011-06-02 00:09:17 +00005285class MipsABIInfo : public ABIInfo {
Akira Hatanaka14378522011-11-02 23:14:57 +00005286 bool IsO32;
Akira Hatanakae1e3ad32012-07-03 19:24:06 +00005287 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
5288 void CoerceToIntArgs(uint64_t TySize,
Craig Topper5603df42013-07-05 19:34:19 +00005289 SmallVectorImpl<llvm::Type *> &ArgList) const;
Akira Hatanaka8ab86cb2012-05-11 21:56:58 +00005290 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
Akira Hatanakaf093f5b2012-01-04 03:34:42 +00005291 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
Akira Hatanaka1632af62012-01-09 19:31:25 +00005292 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
Akira Hatanakab579fe52011-06-02 00:09:17 +00005293public:
Akira Hatanakac4baedd2013-11-11 22:10:46 +00005294 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
Akira Hatanakae1e3ad32012-07-03 19:24:06 +00005295 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
Akira Hatanakac4baedd2013-11-11 22:10:46 +00005296 StackAlignInBytes(IsO32 ? 8 : 16) {}
Akira Hatanakab579fe52011-06-02 00:09:17 +00005297
5298 ABIArgInfo classifyReturnType(QualType RetTy) const;
Akira Hatanakaf64e1ad2012-01-07 00:25:33 +00005299 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
Craig Topper4f12f102014-03-12 06:41:41 +00005300 void computeInfo(CGFunctionInfo &FI) const override;
5301 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5302 CodeGenFunction &CGF) const override;
Akira Hatanakab579fe52011-06-02 00:09:17 +00005303};
5304
John McCall943fae92010-05-27 06:19:26 +00005305class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
Akira Hatanaka0486db02011-09-20 18:23:28 +00005306 unsigned SizeOfUnwindException;
John McCall943fae92010-05-27 06:19:26 +00005307public:
Akira Hatanakac4baedd2013-11-11 22:10:46 +00005308 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
5309 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
Akira Hatanaka14378522011-11-02 23:14:57 +00005310 SizeOfUnwindException(IsO32 ? 24 : 32) {}
John McCall943fae92010-05-27 06:19:26 +00005311
Craig Topper4f12f102014-03-12 06:41:41 +00005312 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
John McCall943fae92010-05-27 06:19:26 +00005313 return 29;
5314 }
5315
Reed Kotler373feca2013-01-16 17:10:28 +00005316 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
Craig Topper4f12f102014-03-12 06:41:41 +00005317 CodeGen::CodeGenModule &CGM) const override {
Reed Kotler3d5966f2013-03-13 20:40:30 +00005318 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5319 if (!FD) return;
Rafael Espindolaa0851a22013-03-19 14:32:23 +00005320 llvm::Function *Fn = cast<llvm::Function>(GV);
Reed Kotler3d5966f2013-03-13 20:40:30 +00005321 if (FD->hasAttr<Mips16Attr>()) {
5322 Fn->addFnAttr("mips16");
5323 }
5324 else if (FD->hasAttr<NoMips16Attr>()) {
5325 Fn->addFnAttr("nomips16");
5326 }
Reed Kotler373feca2013-01-16 17:10:28 +00005327 }
Reed Kotler3d5966f2013-03-13 20:40:30 +00005328
John McCall943fae92010-05-27 06:19:26 +00005329 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
Craig Topper4f12f102014-03-12 06:41:41 +00005330 llvm::Value *Address) const override;
John McCall3480ef22011-08-30 01:42:09 +00005331
Craig Topper4f12f102014-03-12 06:41:41 +00005332 unsigned getSizeOfUnwindException() const override {
Akira Hatanaka0486db02011-09-20 18:23:28 +00005333 return SizeOfUnwindException;
John McCall3480ef22011-08-30 01:42:09 +00005334 }
John McCall943fae92010-05-27 06:19:26 +00005335};
5336}
5337
Akira Hatanakae1e3ad32012-07-03 19:24:06 +00005338void MipsABIInfo::CoerceToIntArgs(uint64_t TySize,
Craig Topper5603df42013-07-05 19:34:19 +00005339 SmallVectorImpl<llvm::Type *> &ArgList) const {
Akira Hatanakae1e3ad32012-07-03 19:24:06 +00005340 llvm::IntegerType *IntTy =
5341 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
Akira Hatanaka8ab86cb2012-05-11 21:56:58 +00005342
5343 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
5344 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
5345 ArgList.push_back(IntTy);
5346
5347 // If necessary, add one more integer type to ArgList.
5348 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
5349
5350 if (R)
5351 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
Akira Hatanaka8ab86cb2012-05-11 21:56:58 +00005352}
5353
Akira Hatanaka101f70d2011-11-02 23:54:49 +00005354// In N32/64, an aligned double precision floating point field is passed in
5355// a register.
Akira Hatanaka8ab86cb2012-05-11 21:56:58 +00005356llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
Akira Hatanakae1e3ad32012-07-03 19:24:06 +00005357 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
5358
5359 if (IsO32) {
5360 CoerceToIntArgs(TySize, ArgList);
5361 return llvm::StructType::get(getVMContext(), ArgList);
5362 }
Akira Hatanaka101f70d2011-11-02 23:54:49 +00005363
Akira Hatanaka02e13e52012-01-12 00:52:17 +00005364 if (Ty->isComplexType())
5365 return CGT.ConvertType(Ty);
Akira Hatanaka79f04612012-01-10 23:12:19 +00005366
Akira Hatanaka4984f5d2012-02-09 19:54:16 +00005367 const RecordType *RT = Ty->getAs<RecordType>();
Akira Hatanaka101f70d2011-11-02 23:54:49 +00005368
Akira Hatanakae1e3ad32012-07-03 19:24:06 +00005369 // Unions/vectors are passed in integer registers.
5370 if (!RT || !RT->isStructureOrClassType()) {
5371 CoerceToIntArgs(TySize, ArgList);
5372 return llvm::StructType::get(getVMContext(), ArgList);
5373 }
Akira Hatanaka101f70d2011-11-02 23:54:49 +00005374
5375 const RecordDecl *RD = RT->getDecl();
5376 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
Akira Hatanaka8ab86cb2012-05-11 21:56:58 +00005377 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
Akira Hatanaka101f70d2011-11-02 23:54:49 +00005378
Akira Hatanaka101f70d2011-11-02 23:54:49 +00005379 uint64_t LastOffset = 0;
5380 unsigned idx = 0;
5381 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
5382
Akira Hatanaka4984f5d2012-02-09 19:54:16 +00005383 // Iterate over fields in the struct/class and check if there are any aligned
5384 // double fields.
Akira Hatanaka101f70d2011-11-02 23:54:49 +00005385 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
5386 i != e; ++i, ++idx) {
David Blaikie2d7c57e2012-04-30 02:36:29 +00005387 const QualType Ty = i->getType();
Akira Hatanaka101f70d2011-11-02 23:54:49 +00005388 const BuiltinType *BT = Ty->getAs<BuiltinType>();
5389
5390 if (!BT || BT->getKind() != BuiltinType::Double)
5391 continue;
5392
5393 uint64_t Offset = Layout.getFieldOffset(idx);
5394 if (Offset % 64) // Ignore doubles that are not aligned.
5395 continue;
5396
5397 // Add ((Offset - LastOffset) / 64) args of type i64.
5398 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
5399 ArgList.push_back(I64);
5400
5401 // Add double type.
5402 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
5403 LastOffset = Offset + 64;
5404 }
5405
Akira Hatanakae1e3ad32012-07-03 19:24:06 +00005406 CoerceToIntArgs(TySize - LastOffset, IntArgList);
5407 ArgList.append(IntArgList.begin(), IntArgList.end());
Akira Hatanaka101f70d2011-11-02 23:54:49 +00005408
5409 return llvm::StructType::get(getVMContext(), ArgList);
5410}
5411
Akira Hatanakaddd66342013-10-29 18:41:15 +00005412llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
5413 uint64_t Offset) const {
5414 if (OrigOffset + MinABIStackAlignInBytes > Offset)
5415 return 0;
Akira Hatanaka1632af62012-01-09 19:31:25 +00005416
Akira Hatanakaddd66342013-10-29 18:41:15 +00005417 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
Akira Hatanaka1632af62012-01-09 19:31:25 +00005418}
Akira Hatanaka21ee88c2012-01-10 22:44:52 +00005419
Akira Hatanakaf64e1ad2012-01-07 00:25:33 +00005420ABIArgInfo
5421MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
Akira Hatanaka1632af62012-01-09 19:31:25 +00005422 uint64_t OrigOffset = Offset;
Akira Hatanaka8ab86cb2012-05-11 21:56:58 +00005423 uint64_t TySize = getContext().getTypeSize(Ty);
Akira Hatanaka1632af62012-01-09 19:31:25 +00005424 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
Akira Hatanaka8ab86cb2012-05-11 21:56:58 +00005425
Akira Hatanakae1e3ad32012-07-03 19:24:06 +00005426 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
5427 (uint64_t)StackAlignInBytes);
Akira Hatanakaddd66342013-10-29 18:41:15 +00005428 unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align);
5429 Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
Akira Hatanaka1632af62012-01-09 19:31:25 +00005430
Akira Hatanakae1e3ad32012-07-03 19:24:06 +00005431 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
Akira Hatanakab579fe52011-06-02 00:09:17 +00005432 // Ignore empty aggregates.
Akira Hatanakaf64e1ad2012-01-07 00:25:33 +00005433 if (TySize == 0)
Akira Hatanakab579fe52011-06-02 00:09:17 +00005434 return ABIArgInfo::getIgnore();
5435
Mark Lacey3825e832013-10-06 01:33:34 +00005436 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
Akira Hatanaka8ab86cb2012-05-11 21:56:58 +00005437 Offset = OrigOffset + MinABIStackAlignInBytes;
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +00005438 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
Akira Hatanakaf64e1ad2012-01-07 00:25:33 +00005439 }
Akira Hatanakadf425db2011-08-01 18:09:58 +00005440
Akira Hatanaka8ab86cb2012-05-11 21:56:58 +00005441 // If we have reached here, aggregates are passed directly by coercing to
5442 // another structure type. Padding is inserted if the offset of the
5443 // aggregate is unaligned.
5444 return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
Akira Hatanakaddd66342013-10-29 18:41:15 +00005445 getPaddingType(OrigOffset, CurrOffset));
Akira Hatanakab579fe52011-06-02 00:09:17 +00005446 }
5447
5448 // Treat an enum type as its underlying type.
5449 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5450 Ty = EnumTy->getDecl()->getIntegerType();
5451
Akira Hatanaka1632af62012-01-09 19:31:25 +00005452 if (Ty->isPromotableIntegerType())
5453 return ABIArgInfo::getExtend();
5454
Akira Hatanakaddd66342013-10-29 18:41:15 +00005455 return ABIArgInfo::getDirect(
5456 0, 0, IsO32 ? 0 : getPaddingType(OrigOffset, CurrOffset));
Akira Hatanakab579fe52011-06-02 00:09:17 +00005457}
5458
Akira Hatanakaf093f5b2012-01-04 03:34:42 +00005459llvm::Type*
5460MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
Akira Hatanakab6f74432012-02-09 18:49:26 +00005461 const RecordType *RT = RetTy->getAs<RecordType>();
Akira Hatanakae1e3ad32012-07-03 19:24:06 +00005462 SmallVector<llvm::Type*, 8> RTList;
Akira Hatanakaf093f5b2012-01-04 03:34:42 +00005463
Akira Hatanakab6f74432012-02-09 18:49:26 +00005464 if (RT && RT->isStructureOrClassType()) {
Akira Hatanakaf093f5b2012-01-04 03:34:42 +00005465 const RecordDecl *RD = RT->getDecl();
Akira Hatanakab6f74432012-02-09 18:49:26 +00005466 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
5467 unsigned FieldCnt = Layout.getFieldCount();
Akira Hatanakaf093f5b2012-01-04 03:34:42 +00005468
Akira Hatanakab6f74432012-02-09 18:49:26 +00005469 // N32/64 returns struct/classes in floating point registers if the
5470 // following conditions are met:
5471 // 1. The size of the struct/class is no larger than 128-bit.
5472 // 2. The struct/class has one or two fields all of which are floating
5473 // point types.
5474 // 3. The offset of the first field is zero (this follows what gcc does).
5475 //
5476 // Any other composite results are returned in integer registers.
5477 //
5478 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
5479 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
5480 for (; b != e; ++b) {
David Blaikie2d7c57e2012-04-30 02:36:29 +00005481 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
Akira Hatanakaf093f5b2012-01-04 03:34:42 +00005482
Akira Hatanakab6f74432012-02-09 18:49:26 +00005483 if (!BT || !BT->isFloatingPoint())
5484 break;
Akira Hatanakaf093f5b2012-01-04 03:34:42 +00005485
David Blaikie2d7c57e2012-04-30 02:36:29 +00005486 RTList.push_back(CGT.ConvertType(b->getType()));
Akira Hatanakab6f74432012-02-09 18:49:26 +00005487 }
5488
5489 if (b == e)
5490 return llvm::StructType::get(getVMContext(), RTList,
5491 RD->hasAttr<PackedAttr>());
5492
5493 RTList.clear();
Akira Hatanakaf093f5b2012-01-04 03:34:42 +00005494 }
Akira Hatanakaf093f5b2012-01-04 03:34:42 +00005495 }
5496
Akira Hatanakae1e3ad32012-07-03 19:24:06 +00005497 CoerceToIntArgs(Size, RTList);
Akira Hatanakaf093f5b2012-01-04 03:34:42 +00005498 return llvm::StructType::get(getVMContext(), RTList);
5499}
5500
Akira Hatanakab579fe52011-06-02 00:09:17 +00005501ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
Akira Hatanaka60f5fe62012-01-23 23:18:57 +00005502 uint64_t Size = getContext().getTypeSize(RetTy);
5503
5504 if (RetTy->isVoidType() || Size == 0)
Akira Hatanakab579fe52011-06-02 00:09:17 +00005505 return ABIArgInfo::getIgnore();
5506
Akira Hatanakac37eddf2012-05-11 21:01:17 +00005507 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
Mark Lacey3825e832013-10-06 01:33:34 +00005508 if (isRecordReturnIndirect(RetTy, getCXXABI()))
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +00005509 return ABIArgInfo::getIndirect(0);
5510
Akira Hatanakaf093f5b2012-01-04 03:34:42 +00005511 if (Size <= 128) {
5512 if (RetTy->isAnyComplexType())
5513 return ABIArgInfo::getDirect();
5514
Akira Hatanakae1e3ad32012-07-03 19:24:06 +00005515 // O32 returns integer vectors in registers.
5516 if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())
5517 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
5518
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +00005519 if (!IsO32)
Akira Hatanakaf093f5b2012-01-04 03:34:42 +00005520 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
5521 }
Akira Hatanakab579fe52011-06-02 00:09:17 +00005522
5523 return ABIArgInfo::getIndirect(0);
5524 }
5525
5526 // Treat an enum type as its underlying type.
5527 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5528 RetTy = EnumTy->getDecl()->getIntegerType();
5529
5530 return (RetTy->isPromotableIntegerType() ?
5531 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5532}
5533
5534void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
Akira Hatanaka32604a92012-01-12 01:10:09 +00005535 ABIArgInfo &RetInfo = FI.getReturnInfo();
5536 RetInfo = classifyReturnType(FI.getReturnType());
5537
5538 // Check if a pointer to an aggregate is passed as a hidden argument.
Akira Hatanaka8ab86cb2012-05-11 21:56:58 +00005539 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
Akira Hatanaka32604a92012-01-12 01:10:09 +00005540
Aaron Ballmanec47bc22014-03-17 18:10:01 +00005541 for (auto &I : FI.arguments())
5542 I.info = classifyArgumentType(I.type, Offset);
Akira Hatanakab579fe52011-06-02 00:09:17 +00005543}
5544
5545llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5546 CodeGenFunction &CGF) const {
Chris Lattnerece04092012-02-07 00:39:47 +00005547 llvm::Type *BP = CGF.Int8PtrTy;
5548 llvm::Type *BPP = CGF.Int8PtrPtrTy;
Akira Hatanakafb1d9f32011-08-01 20:48:01 +00005549
5550 CGBuilderTy &Builder = CGF.Builder;
5551 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
5552 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
Akira Hatanaka37715282012-01-23 23:59:52 +00005553 int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8;
Akira Hatanakafb1d9f32011-08-01 20:48:01 +00005554 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5555 llvm::Value *AddrTyped;
John McCallc8e01702013-04-16 22:48:15 +00005556 unsigned PtrWidth = getTarget().getPointerWidth(0);
Akira Hatanaka37715282012-01-23 23:59:52 +00005557 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
Akira Hatanakafb1d9f32011-08-01 20:48:01 +00005558
5559 if (TypeAlign > MinABIStackAlignInBytes) {
Akira Hatanaka37715282012-01-23 23:59:52 +00005560 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
5561 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
5562 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
5563 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
Akira Hatanakafb1d9f32011-08-01 20:48:01 +00005564 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
5565 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
5566 }
5567 else
5568 AddrTyped = Builder.CreateBitCast(Addr, PTy);
5569
5570 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
Akira Hatanaka37715282012-01-23 23:59:52 +00005571 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
Akira Hatanakafb1d9f32011-08-01 20:48:01 +00005572 uint64_t Offset =
5573 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign);
5574 llvm::Value *NextAddr =
Akira Hatanaka37715282012-01-23 23:59:52 +00005575 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
Akira Hatanakafb1d9f32011-08-01 20:48:01 +00005576 "ap.next");
5577 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5578
5579 return AddrTyped;
Akira Hatanakab579fe52011-06-02 00:09:17 +00005580}
5581
John McCall943fae92010-05-27 06:19:26 +00005582bool
5583MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5584 llvm::Value *Address) const {
5585 // This information comes from gcc's implementation, which seems to
5586 // as canonical as it gets.
5587
John McCall943fae92010-05-27 06:19:26 +00005588 // Everything on MIPS is 4 bytes. Double-precision FP registers
5589 // are aliased to pairs of single-precision FP registers.
Chris Lattnerece04092012-02-07 00:39:47 +00005590 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
John McCall943fae92010-05-27 06:19:26 +00005591
5592 // 0-31 are the general purpose registers, $0 - $31.
5593 // 32-63 are the floating-point registers, $f0 - $f31.
5594 // 64 and 65 are the multiply/divide registers, $hi and $lo.
5595 // 66 is the (notional, I think) register for signal-handler return.
Chris Lattnerece04092012-02-07 00:39:47 +00005596 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
John McCall943fae92010-05-27 06:19:26 +00005597
5598 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
5599 // They are one bit wide and ignored here.
5600
5601 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
5602 // (coprocessor 1 is the FP unit)
5603 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
5604 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
5605 // 176-181 are the DSP accumulator registers.
Chris Lattnerece04092012-02-07 00:39:47 +00005606 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
John McCall943fae92010-05-27 06:19:26 +00005607 return false;
5608}
5609
Peter Collingbourneadcf7c92011-10-13 16:24:41 +00005610//===----------------------------------------------------------------------===//
5611// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
5612// Currently subclassed only to implement custom OpenCL C function attribute
5613// handling.
5614//===----------------------------------------------------------------------===//
5615
5616namespace {
5617
5618class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
5619public:
5620 TCETargetCodeGenInfo(CodeGenTypes &CGT)
5621 : DefaultTargetCodeGenInfo(CGT) {}
5622
Craig Topper4f12f102014-03-12 06:41:41 +00005623 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5624 CodeGen::CodeGenModule &M) const override;
Peter Collingbourneadcf7c92011-10-13 16:24:41 +00005625};
5626
5627void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
5628 llvm::GlobalValue *GV,
5629 CodeGen::CodeGenModule &M) const {
5630 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5631 if (!FD) return;
5632
5633 llvm::Function *F = cast<llvm::Function>(GV);
5634
David Blaikiebbafb8a2012-03-11 07:00:24 +00005635 if (M.getLangOpts().OpenCL) {
Peter Collingbourneadcf7c92011-10-13 16:24:41 +00005636 if (FD->hasAttr<OpenCLKernelAttr>()) {
5637 // OpenCL C Kernel functions are not subject to inlining
Bill Wendling207f0532012-12-20 19:27:06 +00005638 F->addFnAttr(llvm::Attribute::NoInline);
Aaron Ballman36a18ff2013-12-19 13:16:35 +00005639 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
5640 if (Attr) {
Peter Collingbourneadcf7c92011-10-13 16:24:41 +00005641 // Convert the reqd_work_group_size() attributes to metadata.
5642 llvm::LLVMContext &Context = F->getContext();
5643 llvm::NamedMDNode *OpenCLMetadata =
5644 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info");
5645
5646 SmallVector<llvm::Value*, 5> Operands;
5647 Operands.push_back(F);
5648
Chris Lattnerece04092012-02-07 00:39:47 +00005649 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
Aaron Ballman36a18ff2013-12-19 13:16:35 +00005650 llvm::APInt(32, Attr->getXDim())));
Chris Lattnerece04092012-02-07 00:39:47 +00005651 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
Aaron Ballman36a18ff2013-12-19 13:16:35 +00005652 llvm::APInt(32, Attr->getYDim())));
Chris Lattnerece04092012-02-07 00:39:47 +00005653 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
Aaron Ballman36a18ff2013-12-19 13:16:35 +00005654 llvm::APInt(32, Attr->getZDim())));
Peter Collingbourneadcf7c92011-10-13 16:24:41 +00005655
5656 // Add a boolean constant operand for "required" (true) or "hint" (false)
5657 // for implementing the work_group_size_hint attr later. Currently
5658 // always true as the hint is not yet implemented.
Chris Lattnerece04092012-02-07 00:39:47 +00005659 Operands.push_back(llvm::ConstantInt::getTrue(Context));
Peter Collingbourneadcf7c92011-10-13 16:24:41 +00005660 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
5661 }
5662 }
5663 }
5664}
5665
5666}
John McCall943fae92010-05-27 06:19:26 +00005667
Tony Linthicum76329bf2011-12-12 21:14:55 +00005668//===----------------------------------------------------------------------===//
5669// Hexagon ABI Implementation
5670//===----------------------------------------------------------------------===//
5671
5672namespace {
5673
5674class HexagonABIInfo : public ABIInfo {
5675
5676
5677public:
5678 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5679
5680private:
5681
5682 ABIArgInfo classifyReturnType(QualType RetTy) const;
5683 ABIArgInfo classifyArgumentType(QualType RetTy) const;
5684
Craig Topper4f12f102014-03-12 06:41:41 +00005685 void computeInfo(CGFunctionInfo &FI) const override;
Tony Linthicum76329bf2011-12-12 21:14:55 +00005686
Craig Topper4f12f102014-03-12 06:41:41 +00005687 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5688 CodeGenFunction &CGF) const override;
Tony Linthicum76329bf2011-12-12 21:14:55 +00005689};
5690
5691class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
5692public:
5693 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
5694 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
5695
Craig Topper4f12f102014-03-12 06:41:41 +00005696 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
Tony Linthicum76329bf2011-12-12 21:14:55 +00005697 return 29;
5698 }
5699};
5700
5701}
5702
5703void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
5704 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
Aaron Ballmanec47bc22014-03-17 18:10:01 +00005705 for (auto &I : FI.arguments())
5706 I.info = classifyArgumentType(I.type);
Tony Linthicum76329bf2011-12-12 21:14:55 +00005707}
5708
5709ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
5710 if (!isAggregateTypeForABI(Ty)) {
5711 // Treat an enum type as its underlying type.
5712 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5713 Ty = EnumTy->getDecl()->getIntegerType();
5714
5715 return (Ty->isPromotableIntegerType() ?
5716 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5717 }
5718
5719 // Ignore empty records.
5720 if (isEmptyRecord(getContext(), Ty, true))
5721 return ABIArgInfo::getIgnore();
5722
Mark Lacey3825e832013-10-06 01:33:34 +00005723 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
Timur Iskhodzhanov8fe501d2013-04-17 12:54:10 +00005724 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
Tony Linthicum76329bf2011-12-12 21:14:55 +00005725
5726 uint64_t Size = getContext().getTypeSize(Ty);
5727 if (Size > 64)
5728 return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
5729 // Pass in the smallest viable integer type.
5730 else if (Size > 32)
5731 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
5732 else if (Size > 16)
5733 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5734 else if (Size > 8)
5735 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5736 else
5737 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5738}
5739
5740ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
5741 if (RetTy->isVoidType())
5742 return ABIArgInfo::getIgnore();
5743
5744 // Large vector types should be returned via memory.
5745 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
5746 return ABIArgInfo::getIndirect(0);
5747
5748 if (!isAggregateTypeForABI(RetTy)) {
5749 // Treat an enum type as its underlying type.
5750 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5751 RetTy = EnumTy->getDecl()->getIntegerType();
5752
5753 return (RetTy->isPromotableIntegerType() ?
5754 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5755 }
5756
5757 // Structures with either a non-trivial destructor or a non-trivial
5758 // copy constructor are always indirect.
Mark Lacey3825e832013-10-06 01:33:34 +00005759 if (isRecordReturnIndirect(RetTy, getCXXABI()))
Tony Linthicum76329bf2011-12-12 21:14:55 +00005760 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5761
5762 if (isEmptyRecord(getContext(), RetTy, true))
5763 return ABIArgInfo::getIgnore();
5764
5765 // Aggregates <= 8 bytes are returned in r0; other aggregates
5766 // are returned indirectly.
5767 uint64_t Size = getContext().getTypeSize(RetTy);
5768 if (Size <= 64) {
5769 // Return in the smallest viable integer type.
5770 if (Size <= 8)
5771 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5772 if (Size <= 16)
5773 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5774 if (Size <= 32)
5775 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5776 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
5777 }
5778
5779 return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
5780}
5781
5782llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
Chris Lattnerece04092012-02-07 00:39:47 +00005783 CodeGenFunction &CGF) const {
Tony Linthicum76329bf2011-12-12 21:14:55 +00005784 // FIXME: Need to handle alignment
Chris Lattnerece04092012-02-07 00:39:47 +00005785 llvm::Type *BPP = CGF.Int8PtrPtrTy;
Tony Linthicum76329bf2011-12-12 21:14:55 +00005786
5787 CGBuilderTy &Builder = CGF.Builder;
5788 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
5789 "ap");
5790 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5791 llvm::Type *PTy =
5792 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5793 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
5794
5795 uint64_t Offset =
5796 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
5797 llvm::Value *NextAddr =
5798 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
5799 "ap.next");
5800 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5801
5802 return AddrTyped;
5803}
5804
5805
Jakob Stoklund Olesend28ab7e2013-05-27 21:48:25 +00005806//===----------------------------------------------------------------------===//
5807// SPARC v9 ABI Implementation.
5808// Based on the SPARC Compliance Definition version 2.4.1.
5809//
5810// Function arguments a mapped to a nominal "parameter array" and promoted to
5811// registers depending on their type. Each argument occupies 8 or 16 bytes in
5812// the array, structs larger than 16 bytes are passed indirectly.
5813//
5814// One case requires special care:
5815//
5816// struct mixed {
5817// int i;
5818// float f;
5819// };
5820//
5821// When a struct mixed is passed by value, it only occupies 8 bytes in the
5822// parameter array, but the int is passed in an integer register, and the float
5823// is passed in a floating point register. This is represented as two arguments
5824// with the LLVM IR inreg attribute:
5825//
5826// declare void f(i32 inreg %i, float inreg %f)
5827//
5828// The code generator will only allocate 4 bytes from the parameter array for
5829// the inreg arguments. All other arguments are allocated a multiple of 8
5830// bytes.
5831//
5832namespace {
5833class SparcV9ABIInfo : public ABIInfo {
5834public:
5835 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5836
5837private:
5838 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
Craig Topper4f12f102014-03-12 06:41:41 +00005839 void computeInfo(CGFunctionInfo &FI) const override;
5840 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5841 CodeGenFunction &CGF) const override;
Jakob Stoklund Olesen02dc6a12013-05-28 04:57:37 +00005842
5843 // Coercion type builder for structs passed in registers. The coercion type
5844 // serves two purposes:
5845 //
5846 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
5847 // in registers.
5848 // 2. Expose aligned floating point elements as first-level elements, so the
5849 // code generator knows to pass them in floating point registers.
5850 //
5851 // We also compute the InReg flag which indicates that the struct contains
5852 // aligned 32-bit floats.
5853 //
5854 struct CoerceBuilder {
5855 llvm::LLVMContext &Context;
5856 const llvm::DataLayout &DL;
5857 SmallVector<llvm::Type*, 8> Elems;
5858 uint64_t Size;
5859 bool InReg;
5860
5861 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
5862 : Context(c), DL(dl), Size(0), InReg(false) {}
5863
5864 // Pad Elems with integers until Size is ToSize.
5865 void pad(uint64_t ToSize) {
5866 assert(ToSize >= Size && "Cannot remove elements");
5867 if (ToSize == Size)
5868 return;
5869
5870 // Finish the current 64-bit word.
5871 uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64);
5872 if (Aligned > Size && Aligned <= ToSize) {
5873 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
5874 Size = Aligned;
5875 }
5876
5877 // Add whole 64-bit words.
5878 while (Size + 64 <= ToSize) {
5879 Elems.push_back(llvm::Type::getInt64Ty(Context));
5880 Size += 64;
5881 }
5882
5883 // Final in-word padding.
5884 if (Size < ToSize) {
5885 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
5886 Size = ToSize;
5887 }
5888 }
5889
5890 // Add a floating point element at Offset.
5891 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
5892 // Unaligned floats are treated as integers.
5893 if (Offset % Bits)
5894 return;
5895 // The InReg flag is only required if there are any floats < 64 bits.
5896 if (Bits < 64)
5897 InReg = true;
5898 pad(Offset);
5899 Elems.push_back(Ty);
5900 Size = Offset + Bits;
5901 }
5902
5903 // Add a struct type to the coercion type, starting at Offset (in bits).
5904 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
5905 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
5906 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
5907 llvm::Type *ElemTy = StrTy->getElementType(i);
5908 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
5909 switch (ElemTy->getTypeID()) {
5910 case llvm::Type::StructTyID:
5911 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
5912 break;
5913 case llvm::Type::FloatTyID:
5914 addFloat(ElemOffset, ElemTy, 32);
5915 break;
5916 case llvm::Type::DoubleTyID:
5917 addFloat(ElemOffset, ElemTy, 64);
5918 break;
5919 case llvm::Type::FP128TyID:
5920 addFloat(ElemOffset, ElemTy, 128);
5921 break;
5922 case llvm::Type::PointerTyID:
5923 if (ElemOffset % 64 == 0) {
5924 pad(ElemOffset);
5925 Elems.push_back(ElemTy);
5926 Size += 64;
5927 }
5928 break;
5929 default:
5930 break;
5931 }
5932 }
5933 }
5934
5935 // Check if Ty is a usable substitute for the coercion type.
5936 bool isUsableType(llvm::StructType *Ty) const {
5937 if (Ty->getNumElements() != Elems.size())
5938 return false;
5939 for (unsigned i = 0, e = Elems.size(); i != e; ++i)
5940 if (Elems[i] != Ty->getElementType(i))
5941 return false;
5942 return true;
5943 }
5944
5945 // Get the coercion type as a literal struct type.
5946 llvm::Type *getType() const {
5947 if (Elems.size() == 1)
5948 return Elems.front();
5949 else
5950 return llvm::StructType::get(Context, Elems);
5951 }
5952 };
Jakob Stoklund Olesend28ab7e2013-05-27 21:48:25 +00005953};
5954} // end anonymous namespace
5955
5956ABIArgInfo
5957SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
5958 if (Ty->isVoidType())
5959 return ABIArgInfo::getIgnore();
5960
5961 uint64_t Size = getContext().getTypeSize(Ty);
5962
5963 // Anything too big to fit in registers is passed with an explicit indirect
5964 // pointer / sret pointer.
5965 if (Size > SizeLimit)
5966 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5967
5968 // Treat an enum type as its underlying type.
5969 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5970 Ty = EnumTy->getDecl()->getIntegerType();
5971
5972 // Integer types smaller than a register are extended.
5973 if (Size < 64 && Ty->isIntegerType())
5974 return ABIArgInfo::getExtend();
5975
5976 // Other non-aggregates go in registers.
5977 if (!isAggregateTypeForABI(Ty))
5978 return ABIArgInfo::getDirect();
5979
Jakob Stoklund Olesenb81eb3e2014-01-12 06:54:56 +00005980 // If a C++ object has either a non-trivial copy constructor or a non-trivial
5981 // destructor, it is passed with an explicit indirect pointer / sret pointer.
5982 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5983 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5984
Jakob Stoklund Olesend28ab7e2013-05-27 21:48:25 +00005985 // This is a small aggregate type that should be passed in registers.
Jakob Stoklund Olesen02dc6a12013-05-28 04:57:37 +00005986 // Build a coercion type from the LLVM struct type.
5987 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
5988 if (!StrTy)
5989 return ABIArgInfo::getDirect();
5990
5991 CoerceBuilder CB(getVMContext(), getDataLayout());
5992 CB.addStruct(0, StrTy);
5993 CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64));
5994
5995 // Try to use the original type for coercion.
5996 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
5997
5998 if (CB.InReg)
5999 return ABIArgInfo::getDirectInReg(CoerceTy);
6000 else
6001 return ABIArgInfo::getDirect(CoerceTy);
Jakob Stoklund Olesend28ab7e2013-05-27 21:48:25 +00006002}
6003
6004llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6005 CodeGenFunction &CGF) const {
Jakob Stoklund Olesen303caed2013-06-05 03:00:18 +00006006 ABIArgInfo AI = classifyType(Ty, 16 * 8);
6007 llvm::Type *ArgTy = CGT.ConvertType(Ty);
6008 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
6009 AI.setCoerceToType(ArgTy);
6010
6011 llvm::Type *BPP = CGF.Int8PtrPtrTy;
6012 CGBuilderTy &Builder = CGF.Builder;
6013 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
6014 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
6015 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6016 llvm::Value *ArgAddr;
6017 unsigned Stride;
6018
6019 switch (AI.getKind()) {
6020 case ABIArgInfo::Expand:
Reid Kleckner314ef7b2014-02-01 00:04:45 +00006021 case ABIArgInfo::InAlloca:
Jakob Stoklund Olesen303caed2013-06-05 03:00:18 +00006022 llvm_unreachable("Unsupported ABI kind for va_arg");
6023
6024 case ABIArgInfo::Extend:
6025 Stride = 8;
6026 ArgAddr = Builder
6027 .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy),
6028 "extend");
6029 break;
6030
6031 case ABIArgInfo::Direct:
6032 Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
6033 ArgAddr = Addr;
6034 break;
6035
6036 case ABIArgInfo::Indirect:
6037 Stride = 8;
6038 ArgAddr = Builder.CreateBitCast(Addr,
6039 llvm::PointerType::getUnqual(ArgPtrTy),
6040 "indirect");
6041 ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg");
6042 break;
6043
6044 case ABIArgInfo::Ignore:
6045 return llvm::UndefValue::get(ArgPtrTy);
6046 }
6047
6048 // Update VAList.
6049 Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next");
6050 Builder.CreateStore(Addr, VAListAddrAsBPP);
6051
6052 return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr");
Jakob Stoklund Olesend28ab7e2013-05-27 21:48:25 +00006053}
6054
6055void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
6056 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
Aaron Ballmanec47bc22014-03-17 18:10:01 +00006057 for (auto &I : FI.arguments())
6058 I.info = classifyType(I.type, 16 * 8);
Jakob Stoklund Olesend28ab7e2013-05-27 21:48:25 +00006059}
6060
6061namespace {
6062class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
6063public:
6064 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
6065 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
Roman Divackyf02c9942014-02-24 18:46:27 +00006066
Craig Topper4f12f102014-03-12 06:41:41 +00006067 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
Roman Divackyf02c9942014-02-24 18:46:27 +00006068 return 14;
6069 }
6070
6071 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
Craig Topper4f12f102014-03-12 06:41:41 +00006072 llvm::Value *Address) const override;
Jakob Stoklund Olesend28ab7e2013-05-27 21:48:25 +00006073};
6074} // end anonymous namespace
6075
Roman Divackyf02c9942014-02-24 18:46:27 +00006076bool
6077SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6078 llvm::Value *Address) const {
6079 // This is calculated from the LLVM and GCC tables and verified
6080 // against gcc output. AFAIK all ABIs use the same encoding.
6081
6082 CodeGen::CGBuilderTy &Builder = CGF.Builder;
6083
6084 llvm::IntegerType *i8 = CGF.Int8Ty;
6085 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
6086 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
6087
6088 // 0-31: the 8-byte general-purpose registers
6089 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
6090
6091 // 32-63: f0-31, the 4-byte floating-point registers
6092 AssignToArrayRange(Builder, Address, Four8, 32, 63);
6093
6094 // Y = 64
6095 // PSR = 65
6096 // WIM = 66
6097 // TBR = 67
6098 // PC = 68
6099 // NPC = 69
6100 // FSR = 70
6101 // CSR = 71
6102 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
6103
6104 // 72-87: d0-15, the 8-byte floating-point registers
6105 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
6106
6107 return false;
6108}
6109
Jakob Stoklund Olesend28ab7e2013-05-27 21:48:25 +00006110
Robert Lytton0e076492013-08-13 09:43:10 +00006111//===----------------------------------------------------------------------===//
Robert Lyttond21e2d72014-03-03 13:45:29 +00006112// XCore ABI Implementation
Robert Lytton0e076492013-08-13 09:43:10 +00006113//===----------------------------------------------------------------------===//
Robert Lytton844aeeb2014-05-02 09:33:20 +00006114
Robert Lytton0e076492013-08-13 09:43:10 +00006115namespace {
Robert Lytton844aeeb2014-05-02 09:33:20 +00006116
6117/// A SmallStringEnc instance is used to build up the TypeString by passing
6118/// it by reference between functions that append to it.
6119typedef llvm::SmallString<128> SmallStringEnc;
6120
6121/// TypeStringCache caches the meta encodings of Types.
6122///
6123/// The reason for caching TypeStrings is two fold:
6124/// 1. To cache a type's encoding for later uses;
6125/// 2. As a means to break recursive member type inclusion.
6126///
6127/// A cache Entry can have a Status of:
6128/// NonRecursive: The type encoding is not recursive;
6129/// Recursive: The type encoding is recursive;
6130/// Incomplete: An incomplete TypeString;
6131/// IncompleteUsed: An incomplete TypeString that has been used in a
6132/// Recursive type encoding.
6133///
6134/// A NonRecursive entry will have all of its sub-members expanded as fully
6135/// as possible. Whilst it may contain types which are recursive, the type
6136/// itself is not recursive and thus its encoding may be safely used whenever
6137/// the type is encountered.
6138///
6139/// A Recursive entry will have all of its sub-members expanded as fully as
6140/// possible. The type itself is recursive and it may contain other types which
6141/// are recursive. The Recursive encoding must not be used during the expansion
6142/// of a recursive type's recursive branch. For simplicity the code uses
6143/// IncompleteCount to reject all usage of Recursive encodings for member types.
6144///
6145/// An Incomplete entry is always a RecordType and only encodes its
6146/// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
6147/// are placed into the cache during type expansion as a means to identify and
6148/// handle recursive inclusion of types as sub-members. If there is recursion
6149/// the entry becomes IncompleteUsed.
6150///
6151/// During the expansion of a RecordType's members:
6152///
6153/// If the cache contains a NonRecursive encoding for the member type, the
6154/// cached encoding is used;
6155///
6156/// If the cache contains a Recursive encoding for the member type, the
6157/// cached encoding is 'Swapped' out, as it may be incorrect, and...
6158///
6159/// If the member is a RecordType, an Incomplete encoding is placed into the
6160/// cache to break potential recursive inclusion of itself as a sub-member;
6161///
6162/// Once a member RecordType has been expanded, its temporary incomplete
6163/// entry is removed from the cache. If a Recursive encoding was swapped out
6164/// it is swapped back in;
6165///
6166/// If an incomplete entry is used to expand a sub-member, the incomplete
6167/// entry is marked as IncompleteUsed. The cache keeps count of how many
6168/// IncompleteUsed entries it currently contains in IncompleteUsedCount;
6169///
6170/// If a member's encoding is found to be a NonRecursive or Recursive viz:
6171/// IncompleteUsedCount==0, the member's encoding is added to the cache.
6172/// Else the member is part of a recursive type and thus the recursion has
6173/// been exited too soon for the encoding to be correct for the member.
6174///
6175class TypeStringCache {
6176 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
6177 struct Entry {
6178 std::string Str; // The encoded TypeString for the type.
6179 enum Status State; // Information about the encoding in 'Str'.
6180 std::string Swapped; // A temporary place holder for a Recursive encoding
6181 // during the expansion of RecordType's members.
6182 };
6183 std::map<const IdentifierInfo *, struct Entry> Map;
6184 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
6185 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
6186public:
Robert Lyttond263f142014-05-06 09:38:54 +00006187 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {};
Robert Lytton844aeeb2014-05-02 09:33:20 +00006188 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
6189 bool removeIncomplete(const IdentifierInfo *ID);
6190 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
6191 bool IsRecursive);
6192 StringRef lookupStr(const IdentifierInfo *ID);
6193};
6194
6195/// TypeString encodings for union fields must be order.
6196/// FieldEncoding is a helper for this ordering process.
6197class FieldEncoding {
6198 bool HasName;
6199 std::string Enc;
6200public:
6201 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {};
6202 StringRef str() {return Enc.c_str();};
6203 bool operator<(const FieldEncoding &rhs) const {
6204 if (HasName != rhs.HasName) return HasName;
6205 return Enc < rhs.Enc;
6206 }
6207};
6208
Robert Lytton7d1db152013-08-19 09:46:39 +00006209class XCoreABIInfo : public DefaultABIInfo {
6210public:
6211 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
Craig Topper4f12f102014-03-12 06:41:41 +00006212 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6213 CodeGenFunction &CGF) const override;
Robert Lytton7d1db152013-08-19 09:46:39 +00006214};
6215
Robert Lyttond21e2d72014-03-03 13:45:29 +00006216class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
Robert Lytton844aeeb2014-05-02 09:33:20 +00006217 mutable TypeStringCache TSC;
Robert Lytton0e076492013-08-13 09:43:10 +00006218public:
Robert Lyttond21e2d72014-03-03 13:45:29 +00006219 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
Robert Lytton7d1db152013-08-19 09:46:39 +00006220 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
Rafael Espindola8dcd6e72014-05-08 15:01:48 +00006221 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
6222 CodeGen::CodeGenModule &M) const override;
Robert Lytton0e076492013-08-13 09:43:10 +00006223};
Robert Lytton844aeeb2014-05-02 09:33:20 +00006224
Robert Lytton2d196952013-10-11 10:29:34 +00006225} // End anonymous namespace.
Robert Lytton0e076492013-08-13 09:43:10 +00006226
Robert Lytton7d1db152013-08-19 09:46:39 +00006227llvm::Value *XCoreABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6228 CodeGenFunction &CGF) const {
Robert Lytton7d1db152013-08-19 09:46:39 +00006229 CGBuilderTy &Builder = CGF.Builder;
Robert Lytton7d1db152013-08-19 09:46:39 +00006230
Robert Lytton2d196952013-10-11 10:29:34 +00006231 // Get the VAList.
Robert Lytton7d1db152013-08-19 09:46:39 +00006232 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr,
6233 CGF.Int8PtrPtrTy);
6234 llvm::Value *AP = Builder.CreateLoad(VAListAddrAsBPP);
Robert Lytton7d1db152013-08-19 09:46:39 +00006235
Robert Lytton2d196952013-10-11 10:29:34 +00006236 // Handle the argument.
6237 ABIArgInfo AI = classifyArgumentType(Ty);
6238 llvm::Type *ArgTy = CGT.ConvertType(Ty);
6239 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
6240 AI.setCoerceToType(ArgTy);
Robert Lytton7d1db152013-08-19 09:46:39 +00006241 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
Robert Lytton2d196952013-10-11 10:29:34 +00006242 llvm::Value *Val;
Andy Gibbsd9ba4722013-10-14 07:02:04 +00006243 uint64_t ArgSize = 0;
Robert Lytton7d1db152013-08-19 09:46:39 +00006244 switch (AI.getKind()) {
Robert Lytton7d1db152013-08-19 09:46:39 +00006245 case ABIArgInfo::Expand:
Reid Kleckner314ef7b2014-02-01 00:04:45 +00006246 case ABIArgInfo::InAlloca:
Robert Lytton7d1db152013-08-19 09:46:39 +00006247 llvm_unreachable("Unsupported ABI kind for va_arg");
6248 case ABIArgInfo::Ignore:
Robert Lytton2d196952013-10-11 10:29:34 +00006249 Val = llvm::UndefValue::get(ArgPtrTy);
6250 ArgSize = 0;
6251 break;
Robert Lytton7d1db152013-08-19 09:46:39 +00006252 case ABIArgInfo::Extend:
6253 case ABIArgInfo::Direct:
Robert Lytton2d196952013-10-11 10:29:34 +00006254 Val = Builder.CreatePointerCast(AP, ArgPtrTy);
6255 ArgSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
6256 if (ArgSize < 4)
6257 ArgSize = 4;
6258 break;
Robert Lytton7d1db152013-08-19 09:46:39 +00006259 case ABIArgInfo::Indirect:
6260 llvm::Value *ArgAddr;
6261 ArgAddr = Builder.CreateBitCast(AP, llvm::PointerType::getUnqual(ArgPtrTy));
6262 ArgAddr = Builder.CreateLoad(ArgAddr);
Robert Lytton2d196952013-10-11 10:29:34 +00006263 Val = Builder.CreatePointerCast(ArgAddr, ArgPtrTy);
6264 ArgSize = 4;
6265 break;
Robert Lytton7d1db152013-08-19 09:46:39 +00006266 }
Robert Lytton2d196952013-10-11 10:29:34 +00006267
6268 // Increment the VAList.
6269 if (ArgSize) {
6270 llvm::Value *APN = Builder.CreateConstGEP1_32(AP, ArgSize);
6271 Builder.CreateStore(APN, VAListAddrAsBPP);
6272 }
6273 return Val;
Robert Lytton7d1db152013-08-19 09:46:39 +00006274}
Robert Lytton0e076492013-08-13 09:43:10 +00006275
Robert Lytton844aeeb2014-05-02 09:33:20 +00006276/// During the expansion of a RecordType, an incomplete TypeString is placed
6277/// into the cache as a means to identify and break recursion.
6278/// If there is a Recursive encoding in the cache, it is swapped out and will
6279/// be reinserted by removeIncomplete().
6280/// All other types of encoding should have been used rather than arriving here.
6281void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
6282 std::string StubEnc) {
6283 if (!ID)
6284 return;
6285 Entry &E = Map[ID];
6286 assert( (E.Str.empty() || E.State == Recursive) &&
6287 "Incorrectly use of addIncomplete");
6288 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
6289 E.Swapped.swap(E.Str); // swap out the Recursive
6290 E.Str.swap(StubEnc);
6291 E.State = Incomplete;
6292 ++IncompleteCount;
6293}
6294
6295/// Once the RecordType has been expanded, the temporary incomplete TypeString
6296/// must be removed from the cache.
6297/// If a Recursive was swapped out by addIncomplete(), it will be replaced.
6298/// Returns true if the RecordType was defined recursively.
6299bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
6300 if (!ID)
6301 return false;
6302 auto I = Map.find(ID);
6303 assert(I != Map.end() && "Entry not present");
6304 Entry &E = I->second;
6305 assert( (E.State == Incomplete ||
6306 E.State == IncompleteUsed) &&
6307 "Entry must be an incomplete type");
6308 bool IsRecursive = false;
6309 if (E.State == IncompleteUsed) {
6310 // We made use of our Incomplete encoding, thus we are recursive.
6311 IsRecursive = true;
6312 --IncompleteUsedCount;
6313 }
6314 if (E.Swapped.empty())
6315 Map.erase(I);
6316 else {
6317 // Swap the Recursive back.
6318 E.Swapped.swap(E.Str);
6319 E.Swapped.clear();
6320 E.State = Recursive;
6321 }
6322 --IncompleteCount;
6323 return IsRecursive;
6324}
6325
6326/// Add the encoded TypeString to the cache only if it is NonRecursive or
6327/// Recursive (viz: all sub-members were expanded as fully as possible).
6328void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
6329 bool IsRecursive) {
6330 if (!ID || IncompleteUsedCount)
6331 return; // No key or it is is an incomplete sub-type so don't add.
6332 Entry &E = Map[ID];
6333 if (IsRecursive && !E.Str.empty()) {
6334 assert(E.State==Recursive && E.Str.size() == Str.size() &&
6335 "This is not the same Recursive entry");
6336 // The parent container was not recursive after all, so we could have used
6337 // this Recursive sub-member entry after all, but we assumed the worse when
6338 // we started viz: IncompleteCount!=0.
6339 return;
6340 }
6341 assert(E.Str.empty() && "Entry already present");
6342 E.Str = Str.str();
6343 E.State = IsRecursive? Recursive : NonRecursive;
6344}
6345
6346/// Return a cached TypeString encoding for the ID. If there isn't one, or we
6347/// are recursively expanding a type (IncompleteCount != 0) and the cached
6348/// encoding is Recursive, return an empty StringRef.
6349StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
6350 if (!ID)
6351 return StringRef(); // We have no key.
6352 auto I = Map.find(ID);
6353 if (I == Map.end())
6354 return StringRef(); // We have no encoding.
6355 Entry &E = I->second;
6356 if (E.State == Recursive && IncompleteCount)
6357 return StringRef(); // We don't use Recursive encodings for member types.
6358
6359 if (E.State == Incomplete) {
6360 // The incomplete type is being used to break out of recursion.
6361 E.State = IncompleteUsed;
6362 ++IncompleteUsedCount;
6363 }
6364 return E.Str.c_str();
6365}
6366
6367/// The XCore ABI includes a type information section that communicates symbol
6368/// type information to the linker. The linker uses this information to verify
6369/// safety/correctness of things such as array bound and pointers et al.
6370/// The ABI only requires C (and XC) language modules to emit TypeStrings.
6371/// This type information (TypeString) is emitted into meta data for all global
6372/// symbols: definitions, declarations, functions & variables.
6373///
6374/// The TypeString carries type, qualifier, name, size & value details.
6375/// Please see 'Tools Development Guide' section 2.16.2 for format details:
6376/// <https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf>
6377/// The output is tested by test/CodeGen/xcore-stringtype.c.
6378///
6379static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
6380 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
6381
6382/// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
6383void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
6384 CodeGen::CodeGenModule &CGM) const {
6385 SmallStringEnc Enc;
6386 if (getTypeString(Enc, D, CGM, TSC)) {
6387 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
6388 llvm::SmallVector<llvm::Value *, 2> MDVals;
6389 MDVals.push_back(GV);
6390 MDVals.push_back(llvm::MDString::get(Ctx, Enc.str()));
6391 llvm::NamedMDNode *MD =
6392 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
6393 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6394 }
6395}
6396
6397static bool appendType(SmallStringEnc &Enc, QualType QType,
6398 const CodeGen::CodeGenModule &CGM,
6399 TypeStringCache &TSC);
6400
6401/// Helper function for appendRecordType().
6402/// Builds a SmallVector containing the encoded field types in declaration order.
6403static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
6404 const RecordDecl *RD,
6405 const CodeGen::CodeGenModule &CGM,
6406 TypeStringCache &TSC) {
6407 for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
6408 I != E; ++I) {
6409 SmallStringEnc Enc;
6410 Enc += "m(";
6411 Enc += I->getName();
6412 Enc += "){";
6413 if (I->isBitField()) {
6414 Enc += "b(";
6415 llvm::raw_svector_ostream OS(Enc);
6416 OS.resync();
6417 OS << I->getBitWidthValue(CGM.getContext());
6418 OS.flush();
6419 Enc += ':';
6420 }
6421 if (!appendType(Enc, I->getType(), CGM, TSC))
6422 return false;
6423 if (I->isBitField())
6424 Enc += ')';
6425 Enc += '}';
6426 FE.push_back(FieldEncoding(!I->getName().empty(), Enc));
6427 }
6428 return true;
6429}
6430
6431/// Appends structure and union types to Enc and adds encoding to cache.
6432/// Recursively calls appendType (via extractFieldType) for each field.
6433/// Union types have their fields ordered according to the ABI.
6434static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
6435 const CodeGen::CodeGenModule &CGM,
6436 TypeStringCache &TSC, const IdentifierInfo *ID) {
6437 // Append the cached TypeString if we have one.
6438 StringRef TypeString = TSC.lookupStr(ID);
6439 if (!TypeString.empty()) {
6440 Enc += TypeString;
6441 return true;
6442 }
6443
6444 // Start to emit an incomplete TypeString.
6445 size_t Start = Enc.size();
6446 Enc += (RT->isUnionType()? 'u' : 's');
6447 Enc += '(';
6448 if (ID)
6449 Enc += ID->getName();
6450 Enc += "){";
6451
6452 // We collect all encoded fields and order as necessary.
6453 bool IsRecursive = false;
6454 SmallVector<FieldEncoding, 16> FE;
6455 const RecordDecl *RD = RT->getDecl()->getDefinition();
6456 if (RD && !RD->field_empty()) {
6457 // An incomplete TypeString stub is placed in the cache for this RecordType
6458 // so that recursive calls to this RecordType will use it whilst building a
6459 // complete TypeString for this RecordType.
6460 std::string StubEnc(Enc.substr(Start).str());
6461 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
6462 TSC.addIncomplete(ID, std::move(StubEnc));
6463 if (!extractFieldType(FE, RD, CGM, TSC)) {
6464 (void) TSC.removeIncomplete(ID);
6465 return false;
6466 }
6467 IsRecursive = TSC.removeIncomplete(ID);
6468 // The ABI requires unions to be sorted but not structures.
6469 // See FieldEncoding::operator< for sort algorithm.
6470 if (RT->isUnionType())
6471 std::sort(FE.begin(), FE.end());
6472 }
6473
6474 // We can now complete the TypeString.
6475 if (unsigned E = FE.size())
6476 for (unsigned I = 0; I != E; ++I) {
6477 if (I)
6478 Enc += ',';
6479 Enc += FE[I].str();
6480 }
6481 Enc += '}';
6482 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
6483 return true;
6484}
6485
6486/// Appends enum types to Enc and adds the encoding to the cache.
6487static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
6488 TypeStringCache &TSC,
6489 const IdentifierInfo *ID) {
6490 // Append the cached TypeString if we have one.
6491 StringRef TypeString = TSC.lookupStr(ID);
6492 if (!TypeString.empty()) {
6493 Enc += TypeString;
6494 return true;
6495 }
6496
6497 size_t Start = Enc.size();
6498 Enc += "e(";
6499 if (ID)
6500 Enc += ID->getName();
6501 Enc += "){";
6502 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
6503 auto I = ED->enumerator_begin();
6504 auto E = ED->enumerator_end();
6505 while (I != E) {
6506 Enc += "m(";
6507 Enc += I->getName();
6508 Enc += "){";
6509 I->getInitVal().toString(Enc);
6510 Enc += '}';
6511 ++I;
6512 if (I != E)
6513 Enc += ',';
6514 }
6515 }
6516 Enc += '}';
6517 TSC.addIfComplete(ID, Enc.substr(Start), false);
6518 return true;
6519}
6520
6521/// Appends type's qualifier to Enc.
6522/// This is done prior to appending the type's encoding.
6523static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
6524 // Qualifiers are emitted in alphabetical order.
6525 static const char *Table[] = {"","c:","r:","cr:","v:","cv:","rv:","crv:"};
6526 int Lookup = 0;
6527 if (QT.isConstQualified())
6528 Lookup += 1<<0;
6529 if (QT.isRestrictQualified())
6530 Lookup += 1<<1;
6531 if (QT.isVolatileQualified())
6532 Lookup += 1<<2;
6533 Enc += Table[Lookup];
6534}
6535
6536/// Appends built-in types to Enc.
6537static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
6538 const char *EncType;
6539 switch (BT->getKind()) {
6540 case BuiltinType::Void:
6541 EncType = "0";
6542 break;
6543 case BuiltinType::Bool:
6544 EncType = "b";
6545 break;
6546 case BuiltinType::Char_U:
6547 EncType = "uc";
6548 break;
6549 case BuiltinType::UChar:
6550 EncType = "uc";
6551 break;
6552 case BuiltinType::SChar:
6553 EncType = "sc";
6554 break;
6555 case BuiltinType::UShort:
6556 EncType = "us";
6557 break;
6558 case BuiltinType::Short:
6559 EncType = "ss";
6560 break;
6561 case BuiltinType::UInt:
6562 EncType = "ui";
6563 break;
6564 case BuiltinType::Int:
6565 EncType = "si";
6566 break;
6567 case BuiltinType::ULong:
6568 EncType = "ul";
6569 break;
6570 case BuiltinType::Long:
6571 EncType = "sl";
6572 break;
6573 case BuiltinType::ULongLong:
6574 EncType = "ull";
6575 break;
6576 case BuiltinType::LongLong:
6577 EncType = "sll";
6578 break;
6579 case BuiltinType::Float:
6580 EncType = "ft";
6581 break;
6582 case BuiltinType::Double:
6583 EncType = "d";
6584 break;
6585 case BuiltinType::LongDouble:
6586 EncType = "ld";
6587 break;
6588 default:
6589 return false;
6590 }
6591 Enc += EncType;
6592 return true;
6593}
6594
6595/// Appends a pointer encoding to Enc before calling appendType for the pointee.
6596static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
6597 const CodeGen::CodeGenModule &CGM,
6598 TypeStringCache &TSC) {
6599 Enc += "p(";
6600 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
6601 return false;
6602 Enc += ')';
6603 return true;
6604}
6605
6606/// Appends array encoding to Enc before calling appendType for the element.
6607static bool appendArrayType(SmallStringEnc &Enc, const ArrayType *AT,
6608 const CodeGen::CodeGenModule &CGM,
6609 TypeStringCache &TSC, StringRef NoSizeEnc) {
6610 if (AT->getSizeModifier() != ArrayType::Normal)
6611 return false;
6612 Enc += "a(";
6613 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
6614 CAT->getSize().toStringUnsigned(Enc);
6615 else
6616 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
6617 Enc += ':';
6618 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
6619 return false;
6620 Enc += ')';
6621 return true;
6622}
6623
6624/// Appends a function encoding to Enc, calling appendType for the return type
6625/// and the arguments.
6626static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
6627 const CodeGen::CodeGenModule &CGM,
6628 TypeStringCache &TSC) {
6629 Enc += "f{";
6630 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
6631 return false;
6632 Enc += "}(";
6633 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
6634 // N.B. we are only interested in the adjusted param types.
6635 auto I = FPT->param_type_begin();
6636 auto E = FPT->param_type_end();
6637 if (I != E) {
6638 do {
6639 if (!appendType(Enc, *I, CGM, TSC))
6640 return false;
6641 ++I;
6642 if (I != E)
6643 Enc += ',';
6644 } while (I != E);
6645 if (FPT->isVariadic())
6646 Enc += ",va";
6647 } else {
6648 if (FPT->isVariadic())
6649 Enc += "va";
6650 else
6651 Enc += '0';
6652 }
6653 }
6654 Enc += ')';
6655 return true;
6656}
6657
6658/// Handles the type's qualifier before dispatching a call to handle specific
6659/// type encodings.
6660static bool appendType(SmallStringEnc &Enc, QualType QType,
6661 const CodeGen::CodeGenModule &CGM,
6662 TypeStringCache &TSC) {
6663
6664 QualType QT = QType.getCanonicalType();
6665
6666 appendQualifier(Enc, QT);
6667
6668 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
6669 return appendBuiltinType(Enc, BT);
6670
6671 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
6672 return appendArrayType(Enc, AT, CGM, TSC, "");
6673
6674 if (const PointerType *PT = QT->getAs<PointerType>())
6675 return appendPointerType(Enc, PT, CGM, TSC);
6676
6677 if (const EnumType *ET = QT->getAs<EnumType>())
6678 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
6679
6680 if (const RecordType *RT = QT->getAsStructureType())
6681 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
6682
6683 if (const RecordType *RT = QT->getAsUnionType())
6684 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
6685
6686 if (const FunctionType *FT = QT->getAs<FunctionType>())
6687 return appendFunctionType(Enc, FT, CGM, TSC);
6688
6689 return false;
6690}
6691
6692static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
6693 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
6694 if (!D)
6695 return false;
6696
6697 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
6698 if (FD->getLanguageLinkage() != CLanguageLinkage)
6699 return false;
6700 return appendType(Enc, FD->getType(), CGM, TSC);
6701 }
6702
6703 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
6704 if (VD->getLanguageLinkage() != CLanguageLinkage)
6705 return false;
6706 QualType QT = VD->getType().getCanonicalType();
6707 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
6708 // Global ArrayTypes are given a size of '*' if the size is unknown.
6709 appendQualifier(Enc, QT);
6710 return appendArrayType(Enc, AT, CGM, TSC, "*");
6711 }
6712 return appendType(Enc, QT, CGM, TSC);
6713 }
6714 return false;
6715}
6716
6717
Robert Lytton0e076492013-08-13 09:43:10 +00006718//===----------------------------------------------------------------------===//
6719// Driver code
6720//===----------------------------------------------------------------------===//
6721
Chris Lattner2b037972010-07-29 02:01:43 +00006722const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
Anton Korobeynikov55bcea12010-01-10 12:58:08 +00006723 if (TheTargetCodeGenInfo)
6724 return *TheTargetCodeGenInfo;
Anton Korobeynikov244360d2009-06-05 22:08:42 +00006725
John McCallc8e01702013-04-16 22:48:15 +00006726 const llvm::Triple &Triple = getTarget().getTriple();
Daniel Dunbar40165182009-08-24 09:10:05 +00006727 switch (Triple.getArch()) {
Daniel Dunbare3532f82009-08-24 08:52:16 +00006728 default:
Chris Lattner2b037972010-07-29 02:01:43 +00006729 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
Daniel Dunbare3532f82009-08-24 08:52:16 +00006730
Derek Schuff09338a22012-09-06 17:37:28 +00006731 case llvm::Triple::le32:
6732 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
John McCall943fae92010-05-27 06:19:26 +00006733 case llvm::Triple::mips:
6734 case llvm::Triple::mipsel:
Akira Hatanakac4baedd2013-11-11 22:10:46 +00006735 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
6736
Akira Hatanakaec11b4f2011-09-20 18:30:57 +00006737 case llvm::Triple::mips64:
6738 case llvm::Triple::mips64el:
Akira Hatanakac4baedd2013-11-11 22:10:46 +00006739 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
6740
James Molloy7f4ba532014-04-23 10:26:08 +00006741 case llvm::Triple::arm64:
6742 case llvm::Triple::arm64_be: {
Tim Northovera2ee4332014-03-29 15:09:45 +00006743 ARM64ABIInfo::ABIKind Kind = ARM64ABIInfo::AAPCS;
6744 if (strcmp(getTarget().getABI(), "darwinpcs") == 0)
6745 Kind = ARM64ABIInfo::DarwinPCS;
6746
6747 return *(TheTargetCodeGenInfo = new ARM64TargetCodeGenInfo(Types, Kind));
6748 }
6749
Tim Northover9bb857a2013-01-31 12:13:10 +00006750 case llvm::Triple::aarch64:
Christian Pirker9b019ae2014-02-25 13:51:00 +00006751 case llvm::Triple::aarch64_be:
Tim Northover9bb857a2013-01-31 12:13:10 +00006752 return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types));
6753
Daniel Dunbard59655c2009-09-12 00:59:49 +00006754 case llvm::Triple::arm:
Christian Pirkerf01cd6f2014-03-28 14:40:46 +00006755 case llvm::Triple::armeb:
Daniel Dunbard59655c2009-09-12 00:59:49 +00006756 case llvm::Triple::thumb:
Christian Pirkerf01cd6f2014-03-28 14:40:46 +00006757 case llvm::Triple::thumbeb:
Sandeep Patel45df3dd2011-04-05 00:23:47 +00006758 {
6759 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
John McCallc8e01702013-04-16 22:48:15 +00006760 if (strcmp(getTarget().getABI(), "apcs-gnu") == 0)
Sandeep Patel45df3dd2011-04-05 00:23:47 +00006761 Kind = ARMABIInfo::APCS;
David Tweed8f676532012-10-25 13:33:01 +00006762 else if (CodeGenOpts.FloatABI == "hard" ||
John McCallc8e01702013-04-16 22:48:15 +00006763 (CodeGenOpts.FloatABI != "soft" &&
6764 Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
Sandeep Patel45df3dd2011-04-05 00:23:47 +00006765 Kind = ARMABIInfo::AAPCS_VFP;
6766
Derek Schuffa2020962012-10-16 22:30:41 +00006767 switch (Triple.getOS()) {
Eli Benderskyd7c92032012-12-04 18:38:10 +00006768 case llvm::Triple::NaCl:
Derek Schuffa2020962012-10-16 22:30:41 +00006769 return *(TheTargetCodeGenInfo =
6770 new NaClARMTargetCodeGenInfo(Types, Kind));
6771 default:
6772 return *(TheTargetCodeGenInfo =
6773 new ARMTargetCodeGenInfo(Types, Kind));
6774 }
Sandeep Patel45df3dd2011-04-05 00:23:47 +00006775 }
Daniel Dunbard59655c2009-09-12 00:59:49 +00006776
John McCallea8d8bb2010-03-11 00:10:12 +00006777 case llvm::Triple::ppc:
Chris Lattner2b037972010-07-29 02:01:43 +00006778 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
Roman Divackyd966e722012-05-09 18:22:46 +00006779 case llvm::Triple::ppc64:
Bill Schmidt25cb3492012-10-03 19:18:57 +00006780 if (Triple.isOSBinFormatELF())
6781 return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types));
6782 else
6783 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
Bill Schmidt778d3872013-07-26 01:36:11 +00006784 case llvm::Triple::ppc64le:
6785 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
6786 return *(TheTargetCodeGenInfo = new PPC64_SVR4_TargetCodeGenInfo(Types));
John McCallea8d8bb2010-03-11 00:10:12 +00006787
Peter Collingbournec947aae2012-05-20 23:28:41 +00006788 case llvm::Triple::nvptx:
6789 case llvm::Triple::nvptx64:
Justin Holewinski83e96682012-05-24 17:43:12 +00006790 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
Justin Holewinskibd4a3c02011-04-22 11:10:38 +00006791
Anton Korobeynikov55bcea12010-01-10 12:58:08 +00006792 case llvm::Triple::msp430:
Chris Lattner2b037972010-07-29 02:01:43 +00006793 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
Daniel Dunbard59655c2009-09-12 00:59:49 +00006794
Ulrich Weigand47445072013-05-06 16:26:41 +00006795 case llvm::Triple::systemz:
6796 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
6797
Peter Collingbourneadcf7c92011-10-13 16:24:41 +00006798 case llvm::Triple::tce:
6799 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
6800
Eli Friedman33465822011-07-08 23:31:17 +00006801 case llvm::Triple::x86: {
John McCall1fe2a8c2013-06-18 02:46:29 +00006802 bool IsDarwinVectorABI = Triple.isOSDarwin();
6803 bool IsSmallStructInRegABI =
6804 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
Saleem Abdulrasool377066a2014-03-27 22:50:18 +00006805 bool IsWin32FloatStructABI = Triple.isWindowsMSVCEnvironment();
Daniel Dunbar14ad22f2011-04-19 21:43:27 +00006806
John McCall1fe2a8c2013-06-18 02:46:29 +00006807 if (Triple.getOS() == llvm::Triple::Win32) {
Eli Friedmana98d1f82012-01-25 22:46:34 +00006808 return *(TheTargetCodeGenInfo =
Reid Klecknere43f0fe2013-05-08 13:44:39 +00006809 new WinX86_32TargetCodeGenInfo(Types,
John McCall1fe2a8c2013-06-18 02:46:29 +00006810 IsDarwinVectorABI, IsSmallStructInRegABI,
6811 IsWin32FloatStructABI,
Reid Klecknere43f0fe2013-05-08 13:44:39 +00006812 CodeGenOpts.NumRegisterParameters));
John McCall1fe2a8c2013-06-18 02:46:29 +00006813 } else {
Anton Korobeynikov55bcea12010-01-10 12:58:08 +00006814 return *(TheTargetCodeGenInfo =
John McCall1fe2a8c2013-06-18 02:46:29 +00006815 new X86_32TargetCodeGenInfo(Types,
6816 IsDarwinVectorABI, IsSmallStructInRegABI,
6817 IsWin32FloatStructABI,
Rafael Espindola06b2b4a2012-07-31 02:44:24 +00006818 CodeGenOpts.NumRegisterParameters));
Anton Korobeynikov244360d2009-06-05 22:08:42 +00006819 }
Eli Friedman33465822011-07-08 23:31:17 +00006820 }
Anton Korobeynikov244360d2009-06-05 22:08:42 +00006821
Eli Friedmanbfd5add2011-12-02 00:11:43 +00006822 case llvm::Triple::x86_64: {
John McCallc8e01702013-04-16 22:48:15 +00006823 bool HasAVX = strcmp(getTarget().getABI(), "avx") == 0;
Eli Friedmanbfd5add2011-12-02 00:11:43 +00006824
Chris Lattner04dc9572010-08-31 16:44:54 +00006825 switch (Triple.getOS()) {
6826 case llvm::Triple::Win32:
NAKAMURA Takumi31ea2f12011-02-17 08:51:38 +00006827 case llvm::Triple::MinGW32:
Chris Lattner04dc9572010-08-31 16:44:54 +00006828 case llvm::Triple::Cygwin:
6829 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
Eli Benderskyd7c92032012-12-04 18:38:10 +00006830 case llvm::Triple::NaCl:
John McCallc8e01702013-04-16 22:48:15 +00006831 return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types,
6832 HasAVX));
Chris Lattner04dc9572010-08-31 16:44:54 +00006833 default:
Eli Friedmanbfd5add2011-12-02 00:11:43 +00006834 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types,
6835 HasAVX));
Chris Lattner04dc9572010-08-31 16:44:54 +00006836 }
Daniel Dunbare3532f82009-08-24 08:52:16 +00006837 }
Tony Linthicum76329bf2011-12-12 21:14:55 +00006838 case llvm::Triple::hexagon:
6839 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
Jakob Stoklund Olesend28ab7e2013-05-27 21:48:25 +00006840 case llvm::Triple::sparcv9:
6841 return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types));
Robert Lytton0e076492013-08-13 09:43:10 +00006842 case llvm::Triple::xcore:
Robert Lyttond21e2d72014-03-03 13:45:29 +00006843 return *(TheTargetCodeGenInfo = new XCoreTargetCodeGenInfo(Types));
Eli Friedmanbfd5add2011-12-02 00:11:43 +00006844 }
Anton Korobeynikov244360d2009-06-05 22:08:42 +00006845}