blob: 1a8040275ca8c91e7d2d2fde50d8178708f1b471 [file] [log] [blame]
Tim Northover3b0846e2014-05-24 12:50:23 +00001//=- AArch64CallingConv.td - Calling Conventions for AArch64 -*- tablegen -*-=//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This describes the calling conventions for AArch64 architecture.
11//
12//===----------------------------------------------------------------------===//
13
14/// CCIfAlign - Match of the original alignment of the arg
15class CCIfAlign<string Align, CCAction A> :
16 CCIf<!strconcat("ArgFlags.getOrigAlign() == ", Align), A>;
17/// CCIfBigEndian - Match only if we're in big endian mode.
18class CCIfBigEndian<CCAction A> :
Eric Christopherb5217502014-08-06 18:45:26 +000019 CCIf<"State.getMachineFunction().getSubtarget().getDataLayout()->isBigEndian()", A>;
Tim Northover3b0846e2014-05-24 12:50:23 +000020
21//===----------------------------------------------------------------------===//
22// ARM AAPCS64 Calling Convention
23//===----------------------------------------------------------------------===//
24
25def CC_AArch64_AAPCS : CallingConv<[
26 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
27 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
28
29 // Big endian vectors must be passed as if they were 1-element vectors so that
30 // their lanes are in a consistent order.
31 CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8],
32 CCBitConvertToType<f64>>>,
33 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8],
34 CCBitConvertToType<f128>>>,
35
36 // An SRet is passed in X8, not X0 like a normal pointer parameter.
37 CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>,
38
39 // Put ByVal arguments directly on the stack. Minimum size and alignment of a
40 // slot is 64-bit.
41 CCIfByVal<CCPassByVal<8, 8>>,
42
Tim Northover3c55cca2014-11-27 21:02:42 +000043 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
44
Tim Northover3b0846e2014-05-24 12:50:23 +000045 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
46 // up to eight each of GPR and FPR.
Tim Northover6890add2014-06-03 13:54:53 +000047 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
Tim Northover3b0846e2014-05-24 12:50:23 +000048 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
49 [X0, X1, X2, X3, X4, X5, X6, X7]>>,
50 // i128 is split to two i64s, we can't fit half to register X7.
51 CCIfType<[i64], CCIfSplit<CCAssignToRegWithShadow<[X0, X2, X4, X6],
52 [X0, X1, X3, X5]>>>,
53
54 // i128 is split to two i64s, and its stack alignment is 16 bytes.
55 CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>,
56
57 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
58 [W0, W1, W2, W3, W4, W5, W6, W7]>>,
Oliver Stannard6eda6ff2014-07-11 13:33:46 +000059 CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7],
60 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
Tim Northover3b0846e2014-05-24 12:50:23 +000061 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
62 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
63 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
64 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
Oliver Stannard89d15422014-08-27 16:16:04 +000065 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
Tim Northover3b0846e2014-05-24 12:50:23 +000066 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
67 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
Oliver Stannard89d15422014-08-27 16:16:04 +000068 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
Tim Northover3b0846e2014-05-24 12:50:23 +000069 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
70
71 // If more than will fit in registers, pass them on the stack instead.
Oliver Stannard6eda6ff2014-07-11 13:33:46 +000072 CCIfType<[i1, i8, i16, f16], CCAssignToStack<8, 8>>,
Tim Northover3b0846e2014-05-24 12:50:23 +000073 CCIfType<[i32, f32], CCAssignToStack<8, 8>>,
Oliver Stannard89d15422014-08-27 16:16:04 +000074 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16],
Tim Northover3b0846e2014-05-24 12:50:23 +000075 CCAssignToStack<8, 8>>,
Oliver Stannard89d15422014-08-27 16:16:04 +000076 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
Tim Northover3b0846e2014-05-24 12:50:23 +000077 CCAssignToStack<16, 16>>
78]>;
79
80def RetCC_AArch64_AAPCS : CallingConv<[
81 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
82 CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
83
84 // Big endian vectors must be passed as if they were 1-element vectors so that
85 // their lanes are in a consistent order.
86 CCIfBigEndian<CCIfType<[v2i32, v2f32, v4i16, v4f16, v8i8],
87 CCBitConvertToType<f64>>>,
88 CCIfBigEndian<CCIfType<[v2i64, v2f64, v4i32, v4f32, v8i16, v8f16, v16i8],
89 CCBitConvertToType<f128>>>,
90
91 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
92 [X0, X1, X2, X3, X4, X5, X6, X7]>>,
93 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
94 [W0, W1, W2, W3, W4, W5, W6, W7]>>,
Oliver Stannard6eda6ff2014-07-11 13:33:46 +000095 CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7],
96 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
Tim Northover3b0846e2014-05-24 12:50:23 +000097 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
98 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
99 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
100 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
Oliver Stannard89d15422014-08-27 16:16:04 +0000101 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
Tim Northover3b0846e2014-05-24 12:50:23 +0000102 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
103 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
Oliver Stannard89d15422014-08-27 16:16:04 +0000104 CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
Tim Northover3b0846e2014-05-24 12:50:23 +0000105 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>
106]>;
107
108
109// Darwin uses a calling convention which differs in only two ways
110// from the standard one at this level:
111// + i128s (i.e. split i64s) don't need even registers.
112// + Stack slots are sized as needed rather than being at least 64-bit.
113def CC_AArch64_DarwinPCS : CallingConv<[
114 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
115 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
116
117 // An SRet is passed in X8, not X0 like a normal pointer parameter.
118 CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[X8], [W8]>>>,
119
120 // Put ByVal arguments directly on the stack. Minimum size and alignment of a
121 // slot is 64-bit.
122 CCIfByVal<CCPassByVal<8, 8>>,
123
Tim Northover3c55cca2014-11-27 21:02:42 +0000124 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
125
Tim Northover3b0846e2014-05-24 12:50:23 +0000126 // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
127 // up to eight each of GPR and FPR.
Tim Northover6890add2014-06-03 13:54:53 +0000128 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
Tim Northover3b0846e2014-05-24 12:50:23 +0000129 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
130 [X0, X1, X2, X3, X4, X5, X6, X7]>>,
131 // i128 is split to two i64s, we can't fit half to register X7.
132 CCIfType<[i64],
133 CCIfSplit<CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6],
134 [W0, W1, W2, W3, W4, W5, W6]>>>,
135 // i128 is split to two i64s, and its stack alignment is 16 bytes.
136 CCIfType<[i64], CCIfSplit<CCAssignToStackWithShadow<8, 16, [X7]>>>,
137
138 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
139 [W0, W1, W2, W3, W4, W5, W6, W7]>>,
Oliver Stannard6eda6ff2014-07-11 13:33:46 +0000140 CCIfType<[f16], CCAssignToRegWithShadow<[H0, H1, H2, H3, H4, H5, H6, H7],
141 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
Tim Northover3b0846e2014-05-24 12:50:23 +0000142 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
143 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
144 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
145 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
Oliver Stannard89d15422014-08-27 16:16:04 +0000146 CCIfType<[v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
Tim Northover3b0846e2014-05-24 12:50:23 +0000147 CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
148 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
Oliver Stannard89d15422014-08-27 16:16:04 +0000149 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
Tim Northover3b0846e2014-05-24 12:50:23 +0000150 CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
151
152 // If more than will fit in registers, pass them on the stack instead.
Tim Northover6890add2014-06-03 13:54:53 +0000153 CCIf<"ValVT == MVT::i1 || ValVT == MVT::i8", CCAssignToStack<1, 1>>,
Oliver Stannard6eda6ff2014-07-11 13:33:46 +0000154 CCIf<"ValVT == MVT::i16 || ValVT == MVT::f16", CCAssignToStack<2, 2>>,
Tim Northover3b0846e2014-05-24 12:50:23 +0000155 CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
Oliver Stannard89d15422014-08-27 16:16:04 +0000156 CCIfType<[i64, f64, v1f64, v2f32, v1i64, v2i32, v4i16, v8i8, v4f16],
Tim Northover3b0846e2014-05-24 12:50:23 +0000157 CCAssignToStack<8, 8>>,
Oliver Stannard89d15422014-08-27 16:16:04 +0000158 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
159 CCAssignToStack<16, 16>>
Tim Northover3b0846e2014-05-24 12:50:23 +0000160]>;
161
162def CC_AArch64_DarwinPCS_VarArg : CallingConv<[
163 CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
164 CCIfType<[v2f64, v4f32, f128], CCBitConvertToType<v2i64>>,
165
Tim Northover3c55cca2014-11-27 21:02:42 +0000166 CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Stack_Block">>,
167
Tim Northover3b0846e2014-05-24 12:50:23 +0000168 // Handle all scalar types as either i64 or f64.
169 CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
Oliver Stannard6eda6ff2014-07-11 13:33:46 +0000170 CCIfType<[f16, f32], CCPromoteToType<f64>>,
Tim Northover3b0846e2014-05-24 12:50:23 +0000171
172 // Everything is on the stack.
173 // i128 is split to two i64s, and its stack alignment is 16 bytes.
174 CCIfType<[i64], CCIfSplit<CCAssignToStack<8, 16>>>,
Oliver Stannard89d15422014-08-27 16:16:04 +0000175 CCIfType<[i64, f64, v1i64, v2i32, v4i16, v8i8, v1f64, v2f32, v4f16],
176 CCAssignToStack<8, 8>>,
177 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
178 CCAssignToStack<16, 16>>
Tim Northover3b0846e2014-05-24 12:50:23 +0000179]>;
180
181// The WebKit_JS calling convention only passes the first argument (the callee)
182// in register and the remaining arguments on stack. We allow 32bit stack slots,
183// so that WebKit can write partial values in the stack and define the other
184// 32bit quantity as undef.
185def CC_AArch64_WebKit_JS : CallingConv<[
186 // Handle i1, i8, i16, i32, and i64 passing in register X0 (W0).
Tim Northover6890add2014-06-03 13:54:53 +0000187 CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
Tim Northover3b0846e2014-05-24 12:50:23 +0000188 CCIfType<[i32], CCAssignToRegWithShadow<[W0], [X0]>>,
189 CCIfType<[i64], CCAssignToRegWithShadow<[X0], [W0]>>,
190
191 // Pass the remaining arguments on the stack instead.
Tim Northover3b0846e2014-05-24 12:50:23 +0000192 CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
193 CCIfType<[i64, f64], CCAssignToStack<8, 8>>
194]>;
195
196def RetCC_AArch64_WebKit_JS : CallingConv<[
197 CCIfType<[i32], CCAssignToRegWithShadow<[W0, W1, W2, W3, W4, W5, W6, W7],
198 [X0, X1, X2, X3, X4, X5, X6, X7]>>,
199 CCIfType<[i64], CCAssignToRegWithShadow<[X0, X1, X2, X3, X4, X5, X6, X7],
200 [W0, W1, W2, W3, W4, W5, W6, W7]>>,
201 CCIfType<[f32], CCAssignToRegWithShadow<[S0, S1, S2, S3, S4, S5, S6, S7],
202 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
203 CCIfType<[f64], CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
204 [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>
205]>;
206
207// FIXME: LR is only callee-saved in the sense that *we* preserve it and are
208// presumably a callee to someone. External functions may not do so, but this
209// is currently safe since BL has LR as an implicit-def and what happens after a
210// tail call doesn't matter.
211//
212// It would be better to model its preservation semantics properly (create a
213// vreg on entry, use it in RET & tail call generation; make that vreg def if we
214// end up saving LR as part of a call frame). Watch this space...
215def CSR_AArch64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22,
216 X23, X24, X25, X26, X27, X28,
217 D8, D9, D10, D11,
218 D12, D13, D14, D15)>;
219
220// Constructors and destructors return 'this' in the iOS 64-bit C++ ABI; since
221// 'this' and the pointer return value are both passed in X0 in these cases,
222// this can be partially modelled by treating X0 as a callee-saved register;
223// only the resulting RegMask is used; the SaveList is ignored
224//
225// (For generic ARM 64-bit ABI code, clang will not generate constructors or
226// destructors with 'this' returns, so this RegMask will not be used in that
227// case)
228def CSR_AArch64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X0)>;
229
230// The function used by Darwin to obtain the address of a thread-local variable
231// guarantees more than a normal AAPCS function. x16 and x17 are used on the
232// fast path for calculation, but other registers except X0 (argument/return)
233// and LR (it is a call, after all) are preserved.
234def CSR_AArch64_TLS_Darwin
235 : CalleeSavedRegs<(add (sub (sequence "X%u", 1, 28), X16, X17),
236 FP,
237 (sequence "Q%u", 0, 31))>;
238
239// The ELF stub used for TLS-descriptor access saves every feasible
240// register. Only X0 and LR are clobbered.
241def CSR_AArch64_TLS_ELF
242 : CalleeSavedRegs<(add (sequence "X%u", 1, 28), FP,
243 (sequence "Q%u", 0, 31))>;
244
245def CSR_AArch64_AllRegs
246 : CalleeSavedRegs<(add (sequence "W%u", 0, 30), WSP,
247 (sequence "X%u", 0, 28), FP, LR, SP,
248 (sequence "B%u", 0, 31), (sequence "H%u", 0, 31),
249 (sequence "S%u", 0, 31), (sequence "D%u", 0, 31),
250 (sequence "Q%u", 0, 31))>;
251