blob: e8d3f58630e3dee1bd9a7833b7b150562ec0258c [file] [log] [blame]
Bob Wilsone60fee02009-06-22 23:27:02 +00001//===- ARMInstrNEON.td - NEON support for ARM -----------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file describes the ARM NEON instruction set.
11//
12//===----------------------------------------------------------------------===//
13
14//===----------------------------------------------------------------------===//
15// NEON-specific DAG Nodes.
16//===----------------------------------------------------------------------===//
17
18def SDTARMVCMP : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<1, 2>]>;
19
20def NEONvceq : SDNode<"ARMISD::VCEQ", SDTARMVCMP>;
21def NEONvcge : SDNode<"ARMISD::VCGE", SDTARMVCMP>;
22def NEONvcgeu : SDNode<"ARMISD::VCGEU", SDTARMVCMP>;
23def NEONvcgt : SDNode<"ARMISD::VCGT", SDTARMVCMP>;
24def NEONvcgtu : SDNode<"ARMISD::VCGTU", SDTARMVCMP>;
25def NEONvtst : SDNode<"ARMISD::VTST", SDTARMVCMP>;
26
27// Types for vector shift by immediates. The "SHX" version is for long and
28// narrow operations where the source and destination vectors have different
29// types. The "SHINS" version is for shift and insert operations.
30def SDTARMVSH : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
31 SDTCisVT<2, i32>]>;
32def SDTARMVSHX : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
33 SDTCisVT<2, i32>]>;
34def SDTARMVSHINS : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
35 SDTCisSameAs<0, 2>, SDTCisVT<3, i32>]>;
36
37def NEONvshl : SDNode<"ARMISD::VSHL", SDTARMVSH>;
38def NEONvshrs : SDNode<"ARMISD::VSHRs", SDTARMVSH>;
39def NEONvshru : SDNode<"ARMISD::VSHRu", SDTARMVSH>;
40def NEONvshlls : SDNode<"ARMISD::VSHLLs", SDTARMVSHX>;
41def NEONvshllu : SDNode<"ARMISD::VSHLLu", SDTARMVSHX>;
42def NEONvshlli : SDNode<"ARMISD::VSHLLi", SDTARMVSHX>;
43def NEONvshrn : SDNode<"ARMISD::VSHRN", SDTARMVSHX>;
44
45def NEONvrshrs : SDNode<"ARMISD::VRSHRs", SDTARMVSH>;
46def NEONvrshru : SDNode<"ARMISD::VRSHRu", SDTARMVSH>;
47def NEONvrshrn : SDNode<"ARMISD::VRSHRN", SDTARMVSHX>;
48
49def NEONvqshls : SDNode<"ARMISD::VQSHLs", SDTARMVSH>;
50def NEONvqshlu : SDNode<"ARMISD::VQSHLu", SDTARMVSH>;
51def NEONvqshlsu : SDNode<"ARMISD::VQSHLsu", SDTARMVSH>;
52def NEONvqshrns : SDNode<"ARMISD::VQSHRNs", SDTARMVSHX>;
53def NEONvqshrnu : SDNode<"ARMISD::VQSHRNu", SDTARMVSHX>;
54def NEONvqshrnsu : SDNode<"ARMISD::VQSHRNsu", SDTARMVSHX>;
55
56def NEONvqrshrns : SDNode<"ARMISD::VQRSHRNs", SDTARMVSHX>;
57def NEONvqrshrnu : SDNode<"ARMISD::VQRSHRNu", SDTARMVSHX>;
58def NEONvqrshrnsu : SDNode<"ARMISD::VQRSHRNsu", SDTARMVSHX>;
59
60def NEONvsli : SDNode<"ARMISD::VSLI", SDTARMVSHINS>;
61def NEONvsri : SDNode<"ARMISD::VSRI", SDTARMVSHINS>;
62
63def SDTARMVGETLN : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisInt<1>,
64 SDTCisVT<2, i32>]>;
65def NEONvgetlaneu : SDNode<"ARMISD::VGETLANEu", SDTARMVGETLN>;
66def NEONvgetlanes : SDNode<"ARMISD::VGETLANEs", SDTARMVGETLN>;
67
68def NEONvduplaneq : SDNode<"ARMISD::VDUPLANEQ",
69 SDTypeProfile<1, 2, [SDTCisVT<2, i32>]>>;
70
71//===----------------------------------------------------------------------===//
72// NEON operand definitions
73//===----------------------------------------------------------------------===//
74
75// addrmode_neonldstm := reg
76//
77/* TODO: Take advantage of vldm.
78def addrmode_neonldstm : Operand<i32>,
79 ComplexPattern<i32, 2, "SelectAddrModeNeonLdStM", []> {
80 let PrintMethod = "printAddrNeonLdStMOperand";
81 let MIOperandInfo = (ops GPR, i32imm);
82}
83*/
84
85//===----------------------------------------------------------------------===//
86// NEON load / store instructions
87//===----------------------------------------------------------------------===//
88
89/* TODO: Take advantage of vldm.
90let mayLoad = 1 in {
91def VLDMD : NI<(outs),
92 (ins addrmode_neonldstm:$addr, reglist:$dst1, variable_ops),
93 "vldm${addr:submode} ${addr:base}, $dst1",
94 []>;
95
96def VLDMS : NI<(outs),
97 (ins addrmode_neonldstm:$addr, reglist:$dst1, variable_ops),
98 "vldm${addr:submode} ${addr:base}, $dst1",
99 []>;
100}
101*/
102
103// Use vldmia to load a Q register as a D register pair.
104def VLDRQ : NI<(outs QPR:$dst), (ins GPR:$addr),
105 "vldmia $addr, ${dst:dregpair}",
106 [(set QPR:$dst, (v2f64 (load GPR:$addr)))]>;
107
108// Use vstmia to store a Q register as a D register pair.
109def VSTRQ : NI<(outs), (ins QPR:$src, GPR:$addr),
110 "vstmia $addr, ${src:dregpair}",
111 [(store (v2f64 QPR:$src), GPR:$addr)]>;
112
113
Bob Wilsoned592c02009-07-08 18:11:30 +0000114// VLD1 : Vector Load (multiple single elements)
115class VLD1D<string OpcodeStr, ValueType Ty, Intrinsic IntOp>
116 : NLdSt<(outs DPR:$dst), (ins addrmode6:$addr),
117 !strconcat(OpcodeStr, "\t${dst:dregsingle}, $addr"),
118 [(set DPR:$dst, (Ty (IntOp addrmode6:$addr, 1)))]>;
119class VLD1Q<string OpcodeStr, ValueType Ty, Intrinsic IntOp>
120 : NLdSt<(outs QPR:$dst), (ins addrmode6:$addr),
121 !strconcat(OpcodeStr, "\t${dst:dregpair}, $addr"),
122 [(set QPR:$dst, (Ty (IntOp addrmode6:$addr, 1)))]>;
123
124def VLD1d8 : VLD1D<"vld1.8", v8i8, int_arm_neon_vldi>;
125def VLD1d16 : VLD1D<"vld1.16", v4i16, int_arm_neon_vldi>;
126def VLD1d32 : VLD1D<"vld1.32", v2i32, int_arm_neon_vldi>;
127def VLD1df : VLD1D<"vld1.32", v2f32, int_arm_neon_vldf>;
128def VLD1d64 : VLD1D<"vld1.64", v1i64, int_arm_neon_vldi>;
129
130def VLD1q8 : VLD1Q<"vld1.8", v16i8, int_arm_neon_vldi>;
131def VLD1q16 : VLD1Q<"vld1.16", v8i16, int_arm_neon_vldi>;
132def VLD1q32 : VLD1Q<"vld1.32", v4i32, int_arm_neon_vldi>;
133def VLD1qf : VLD1Q<"vld1.32", v4f32, int_arm_neon_vldf>;
134def VLD1q64 : VLD1Q<"vld1.64", v2i64, int_arm_neon_vldi>;
135
136
Bob Wilsone60fee02009-06-22 23:27:02 +0000137//===----------------------------------------------------------------------===//
138// NEON pattern fragments
139//===----------------------------------------------------------------------===//
140
141// Extract D sub-registers of Q registers.
142// (arm_dsubreg_0 is 5; arm_dsubreg_1 is 6)
143def SubReg_i8_reg : SDNodeXForm<imm, [{
144 return CurDAG->getTargetConstant(5 + N->getZExtValue() / 8, MVT::i32);
145}]>;
146def SubReg_i16_reg : SDNodeXForm<imm, [{
147 return CurDAG->getTargetConstant(5 + N->getZExtValue() / 4, MVT::i32);
148}]>;
149def SubReg_i32_reg : SDNodeXForm<imm, [{
150 return CurDAG->getTargetConstant(5 + N->getZExtValue() / 2, MVT::i32);
151}]>;
152def SubReg_f64_reg : SDNodeXForm<imm, [{
153 return CurDAG->getTargetConstant(5 + N->getZExtValue(), MVT::i32);
154}]>;
155
156// Translate lane numbers from Q registers to D subregs.
157def SubReg_i8_lane : SDNodeXForm<imm, [{
158 return CurDAG->getTargetConstant(N->getZExtValue() & 7, MVT::i32);
159}]>;
160def SubReg_i16_lane : SDNodeXForm<imm, [{
161 return CurDAG->getTargetConstant(N->getZExtValue() & 3, MVT::i32);
162}]>;
163def SubReg_i32_lane : SDNodeXForm<imm, [{
164 return CurDAG->getTargetConstant(N->getZExtValue() & 1, MVT::i32);
165}]>;
166
167//===----------------------------------------------------------------------===//
168// Instruction Classes
169//===----------------------------------------------------------------------===//
170
171// Basic 2-register operations, both double- and quad-register.
172class N2VD<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
173 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
174 ValueType ResTy, ValueType OpTy, SDNode OpNode>
175 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$dst),
176 (ins DPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
177 [(set DPR:$dst, (ResTy (OpNode (OpTy DPR:$src))))]>;
178class N2VQ<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
179 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
180 ValueType ResTy, ValueType OpTy, SDNode OpNode>
181 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$dst),
182 (ins QPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
183 [(set QPR:$dst, (ResTy (OpNode (OpTy QPR:$src))))]>;
184
185// Basic 2-register intrinsics, both double- and quad-register.
186class N2VDInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
187 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
188 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
189 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$dst),
190 (ins DPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
191 [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src))))]>;
192class N2VQInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
193 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
194 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
195 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$dst),
196 (ins QPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
197 [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src))))]>;
198
199// Narrow 2-register intrinsics.
200class N2VNInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
201 bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
202 string OpcodeStr, ValueType TyD, ValueType TyQ, Intrinsic IntOp>
203 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs DPR:$dst),
204 (ins QPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
205 [(set DPR:$dst, (TyD (IntOp (TyQ QPR:$src))))]>;
206
207// Long 2-register intrinsics. (This is currently only used for VMOVL and is
208// derived from N2VImm instead of N2V because of the way the size is encoded.)
209class N2VLInt<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
210 bit op6, bit op4, string OpcodeStr, ValueType TyQ, ValueType TyD,
211 Intrinsic IntOp>
212 : N2VImm<op24, op23, op21_16, op11_8, op7, op6, op4, (outs QPR:$dst),
213 (ins DPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
214 [(set QPR:$dst, (TyQ (IntOp (TyD DPR:$src))))]>;
215
216// Basic 3-register operations, both double- and quad-register.
217class N3VD<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
218 string OpcodeStr, ValueType ResTy, ValueType OpTy,
219 SDNode OpNode, bit Commutable>
220 : N3V<op24, op23, op21_20, op11_8, 0, op4,
221 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2),
222 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
223 [(set DPR:$dst, (ResTy (OpNode (OpTy DPR:$src1), (OpTy DPR:$src2))))]> {
224 let isCommutable = Commutable;
225}
226class N3VQ<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
227 string OpcodeStr, ValueType ResTy, ValueType OpTy,
228 SDNode OpNode, bit Commutable>
229 : N3V<op24, op23, op21_20, op11_8, 1, op4,
230 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2),
231 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
232 [(set QPR:$dst, (ResTy (OpNode (OpTy QPR:$src1), (OpTy QPR:$src2))))]> {
233 let isCommutable = Commutable;
234}
235
236// Basic 3-register intrinsics, both double- and quad-register.
237class N3VDInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
238 string OpcodeStr, ValueType ResTy, ValueType OpTy,
239 Intrinsic IntOp, bit Commutable>
240 : N3V<op24, op23, op21_20, op11_8, 0, op4,
241 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2),
242 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
243 [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src1), (OpTy DPR:$src2))))]> {
244 let isCommutable = Commutable;
245}
246class N3VQInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
247 string OpcodeStr, ValueType ResTy, ValueType OpTy,
248 Intrinsic IntOp, bit Commutable>
249 : N3V<op24, op23, op21_20, op11_8, 1, op4,
250 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2),
251 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
252 [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src1), (OpTy QPR:$src2))))]> {
253 let isCommutable = Commutable;
254}
255
256// Multiply-Add/Sub operations, both double- and quad-register.
257class N3VDMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
258 string OpcodeStr, ValueType Ty, SDNode MulOp, SDNode OpNode>
259 : N3V<op24, op23, op21_20, op11_8, 0, op4,
260 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, DPR:$src3),
261 !strconcat(OpcodeStr, "\t$dst, $src2, $src3"), "$src1 = $dst",
262 [(set DPR:$dst, (Ty (OpNode DPR:$src1,
263 (Ty (MulOp DPR:$src2, DPR:$src3)))))]>;
264class N3VQMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
265 string OpcodeStr, ValueType Ty, SDNode MulOp, SDNode OpNode>
266 : N3V<op24, op23, op21_20, op11_8, 1, op4,
267 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, QPR:$src3),
268 !strconcat(OpcodeStr, "\t$dst, $src2, $src3"), "$src1 = $dst",
269 [(set QPR:$dst, (Ty (OpNode QPR:$src1,
270 (Ty (MulOp QPR:$src2, QPR:$src3)))))]>;
271
272// Neon 3-argument intrinsics, both double- and quad-register.
273// The destination register is also used as the first source operand register.
274class N3VDInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
275 string OpcodeStr, ValueType ResTy, ValueType OpTy,
276 Intrinsic IntOp>
277 : N3V<op24, op23, op21_20, op11_8, 0, op4,
278 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, DPR:$src3),
279 !strconcat(OpcodeStr, "\t$dst, $src2, $src3"), "$src1 = $dst",
280 [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src1),
281 (OpTy DPR:$src2), (OpTy DPR:$src3))))]>;
282class N3VQInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
283 string OpcodeStr, ValueType ResTy, ValueType OpTy,
284 Intrinsic IntOp>
285 : N3V<op24, op23, op21_20, op11_8, 1, op4,
286 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, QPR:$src3),
287 !strconcat(OpcodeStr, "\t$dst, $src2, $src3"), "$src1 = $dst",
288 [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src1),
289 (OpTy QPR:$src2), (OpTy QPR:$src3))))]>;
290
291// Neon Long 3-argument intrinsic. The destination register is
292// a quad-register and is also used as the first source operand register.
293class N3VLInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
294 string OpcodeStr, ValueType TyQ, ValueType TyD, Intrinsic IntOp>
295 : N3V<op24, op23, op21_20, op11_8, 0, op4,
296 (outs QPR:$dst), (ins QPR:$src1, DPR:$src2, DPR:$src3),
297 !strconcat(OpcodeStr, "\t$dst, $src2, $src3"), "$src1 = $dst",
298 [(set QPR:$dst,
299 (TyQ (IntOp (TyQ QPR:$src1), (TyD DPR:$src2), (TyD DPR:$src3))))]>;
300
301// Narrowing 3-register intrinsics.
302class N3VNInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
303 string OpcodeStr, ValueType TyD, ValueType TyQ,
304 Intrinsic IntOp, bit Commutable>
305 : N3V<op24, op23, op21_20, op11_8, 0, op4,
306 (outs DPR:$dst), (ins QPR:$src1, QPR:$src2),
307 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
308 [(set DPR:$dst, (TyD (IntOp (TyQ QPR:$src1), (TyQ QPR:$src2))))]> {
309 let isCommutable = Commutable;
310}
311
312// Long 3-register intrinsics.
313class N3VLInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
314 string OpcodeStr, ValueType TyQ, ValueType TyD,
315 Intrinsic IntOp, bit Commutable>
316 : N3V<op24, op23, op21_20, op11_8, 0, op4,
317 (outs QPR:$dst), (ins DPR:$src1, DPR:$src2),
318 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
319 [(set QPR:$dst, (TyQ (IntOp (TyD DPR:$src1), (TyD DPR:$src2))))]> {
320 let isCommutable = Commutable;
321}
322
323// Wide 3-register intrinsics.
324class N3VWInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
325 string OpcodeStr, ValueType TyQ, ValueType TyD,
326 Intrinsic IntOp, bit Commutable>
327 : N3V<op24, op23, op21_20, op11_8, 0, op4,
328 (outs QPR:$dst), (ins QPR:$src1, DPR:$src2),
329 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
330 [(set QPR:$dst, (TyQ (IntOp (TyQ QPR:$src1), (TyD DPR:$src2))))]> {
331 let isCommutable = Commutable;
332}
333
334// Pairwise long 2-register intrinsics, both double- and quad-register.
335class N2VDPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
336 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
337 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
338 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$dst),
339 (ins DPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
340 [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src))))]>;
341class N2VQPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
342 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
343 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
344 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$dst),
345 (ins QPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
346 [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src))))]>;
347
348// Pairwise long 2-register accumulate intrinsics,
349// both double- and quad-register.
350// The destination register is also used as the first source operand register.
351class N2VDPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
352 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
353 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
354 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4,
355 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2),
356 !strconcat(OpcodeStr, "\t$dst, $src2"), "$src1 = $dst",
357 [(set DPR:$dst, (ResTy (IntOp (ResTy DPR:$src1), (OpTy DPR:$src2))))]>;
358class N2VQPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
359 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
360 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
361 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4,
362 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2),
363 !strconcat(OpcodeStr, "\t$dst, $src2"), "$src1 = $dst",
364 [(set QPR:$dst, (ResTy (IntOp (ResTy QPR:$src1), (OpTy QPR:$src2))))]>;
365
366// Shift by immediate,
367// both double- and quad-register.
368class N2VDSh<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
369 bit op4, string OpcodeStr, ValueType Ty, SDNode OpNode>
370 : N2VImm<op24, op23, op21_16, op11_8, op7, 0, op4,
371 (outs DPR:$dst), (ins DPR:$src, i32imm:$SIMM),
372 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
373 [(set DPR:$dst, (Ty (OpNode (Ty DPR:$src), (i32 imm:$SIMM))))]>;
374class N2VQSh<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
375 bit op4, string OpcodeStr, ValueType Ty, SDNode OpNode>
376 : N2VImm<op24, op23, op21_16, op11_8, op7, 1, op4,
377 (outs QPR:$dst), (ins QPR:$src, i32imm:$SIMM),
378 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
379 [(set QPR:$dst, (Ty (OpNode (Ty QPR:$src), (i32 imm:$SIMM))))]>;
380
381// Long shift by immediate.
382class N2VLSh<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
383 bit op6, bit op4, string OpcodeStr, ValueType ResTy,
384 ValueType OpTy, SDNode OpNode>
385 : N2VImm<op24, op23, op21_16, op11_8, op7, op6, op4,
386 (outs QPR:$dst), (ins DPR:$src, i32imm:$SIMM),
387 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
388 [(set QPR:$dst, (ResTy (OpNode (OpTy DPR:$src),
389 (i32 imm:$SIMM))))]>;
390
391// Narrow shift by immediate.
392class N2VNSh<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
393 bit op6, bit op4, string OpcodeStr, ValueType ResTy,
394 ValueType OpTy, SDNode OpNode>
395 : N2VImm<op24, op23, op21_16, op11_8, op7, op6, op4,
396 (outs DPR:$dst), (ins QPR:$src, i32imm:$SIMM),
397 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
398 [(set DPR:$dst, (ResTy (OpNode (OpTy QPR:$src),
399 (i32 imm:$SIMM))))]>;
400
401// Shift right by immediate and accumulate,
402// both double- and quad-register.
403class N2VDShAdd<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
404 bit op4, string OpcodeStr, ValueType Ty, SDNode ShOp>
405 : N2VImm<op24, op23, op21_16, op11_8, op7, 0, op4,
406 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, i32imm:$SIMM),
407 !strconcat(OpcodeStr, "\t$dst, $src2, $SIMM"), "$src1 = $dst",
408 [(set DPR:$dst, (Ty (add DPR:$src1,
409 (Ty (ShOp DPR:$src2, (i32 imm:$SIMM))))))]>;
410class N2VQShAdd<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
411 bit op4, string OpcodeStr, ValueType Ty, SDNode ShOp>
412 : N2VImm<op24, op23, op21_16, op11_8, op7, 1, op4,
413 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, i32imm:$SIMM),
414 !strconcat(OpcodeStr, "\t$dst, $src2, $SIMM"), "$src1 = $dst",
415 [(set QPR:$dst, (Ty (add QPR:$src1,
416 (Ty (ShOp QPR:$src2, (i32 imm:$SIMM))))))]>;
417
418// Shift by immediate and insert,
419// both double- and quad-register.
420class N2VDShIns<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
421 bit op4, string OpcodeStr, ValueType Ty, SDNode ShOp>
422 : N2VImm<op24, op23, op21_16, op11_8, op7, 0, op4,
423 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, i32imm:$SIMM),
424 !strconcat(OpcodeStr, "\t$dst, $src2, $SIMM"), "$src1 = $dst",
425 [(set DPR:$dst, (Ty (ShOp DPR:$src1, DPR:$src2, (i32 imm:$SIMM))))]>;
426class N2VQShIns<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
427 bit op4, string OpcodeStr, ValueType Ty, SDNode ShOp>
428 : N2VImm<op24, op23, op21_16, op11_8, op7, 1, op4,
429 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, i32imm:$SIMM),
430 !strconcat(OpcodeStr, "\t$dst, $src2, $SIMM"), "$src1 = $dst",
431 [(set QPR:$dst, (Ty (ShOp QPR:$src1, QPR:$src2, (i32 imm:$SIMM))))]>;
432
433// Convert, with fractional bits immediate,
434// both double- and quad-register.
435class N2VCvtD<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
436 bit op4, string OpcodeStr, ValueType ResTy, ValueType OpTy,
437 Intrinsic IntOp>
438 : N2VImm<op24, op23, op21_16, op11_8, op7, 0, op4,
439 (outs DPR:$dst), (ins DPR:$src, i32imm:$SIMM),
440 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
441 [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src), (i32 imm:$SIMM))))]>;
442class N2VCvtQ<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
443 bit op4, string OpcodeStr, ValueType ResTy, ValueType OpTy,
444 Intrinsic IntOp>
445 : N2VImm<op24, op23, op21_16, op11_8, op7, 1, op4,
446 (outs QPR:$dst), (ins QPR:$src, i32imm:$SIMM),
447 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
448 [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src), (i32 imm:$SIMM))))]>;
449
450//===----------------------------------------------------------------------===//
451// Multiclasses
452//===----------------------------------------------------------------------===//
453
454// Neon 3-register vector operations.
455
456// First with only element sizes of 8, 16 and 32 bits:
457multiclass N3V_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
458 string OpcodeStr, SDNode OpNode, bit Commutable = 0> {
459 // 64-bit vector types.
460 def v8i8 : N3VD<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
461 v8i8, v8i8, OpNode, Commutable>;
462 def v4i16 : N3VD<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr, "16"),
463 v4i16, v4i16, OpNode, Commutable>;
464 def v2i32 : N3VD<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr, "32"),
465 v2i32, v2i32, OpNode, Commutable>;
466
467 // 128-bit vector types.
468 def v16i8 : N3VQ<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
469 v16i8, v16i8, OpNode, Commutable>;
470 def v8i16 : N3VQ<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr, "16"),
471 v8i16, v8i16, OpNode, Commutable>;
472 def v4i32 : N3VQ<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr, "32"),
473 v4i32, v4i32, OpNode, Commutable>;
474}
475
476// ....then also with element size 64 bits:
477multiclass N3V_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
478 string OpcodeStr, SDNode OpNode, bit Commutable = 0>
479 : N3V_QHS<op24, op23, op11_8, op4, OpcodeStr, OpNode, Commutable> {
480 def v1i64 : N3VD<op24, op23, 0b11, op11_8, op4, !strconcat(OpcodeStr, "64"),
481 v1i64, v1i64, OpNode, Commutable>;
482 def v2i64 : N3VQ<op24, op23, 0b11, op11_8, op4, !strconcat(OpcodeStr, "64"),
483 v2i64, v2i64, OpNode, Commutable>;
484}
485
486
487// Neon Narrowing 2-register vector intrinsics,
488// source operand element sizes of 16, 32 and 64 bits:
489multiclass N2VNInt_HSD<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
490 bits<5> op11_7, bit op6, bit op4, string OpcodeStr,
491 Intrinsic IntOp> {
492 def v8i8 : N2VNInt<op24_23, op21_20, 0b00, op17_16, op11_7, op6, op4,
493 !strconcat(OpcodeStr, "16"), v8i8, v8i16, IntOp>;
494 def v4i16 : N2VNInt<op24_23, op21_20, 0b01, op17_16, op11_7, op6, op4,
495 !strconcat(OpcodeStr, "32"), v4i16, v4i32, IntOp>;
496 def v2i32 : N2VNInt<op24_23, op21_20, 0b10, op17_16, op11_7, op6, op4,
497 !strconcat(OpcodeStr, "64"), v2i32, v2i64, IntOp>;
498}
499
500
501// Neon Lengthening 2-register vector intrinsic (currently specific to VMOVL).
502// source operand element sizes of 16, 32 and 64 bits:
503multiclass N2VLInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
504 bit op4, string OpcodeStr, Intrinsic IntOp> {
505 def v8i16 : N2VLInt<op24, op23, 0b001000, op11_8, op7, op6, op4,
506 !strconcat(OpcodeStr, "8"), v8i16, v8i8, IntOp>;
507 def v4i32 : N2VLInt<op24, op23, 0b010000, op11_8, op7, op6, op4,
508 !strconcat(OpcodeStr, "16"), v4i32, v4i16, IntOp>;
509 def v2i64 : N2VLInt<op24, op23, 0b100000, op11_8, op7, op6, op4,
510 !strconcat(OpcodeStr, "32"), v2i64, v2i32, IntOp>;
511}
512
513
514// Neon 3-register vector intrinsics.
515
516// First with only element sizes of 16 and 32 bits:
517multiclass N3VInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
518 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0> {
519 // 64-bit vector types.
520 def v4i16 : N3VDInt<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr,"16"),
521 v4i16, v4i16, IntOp, Commutable>;
522 def v2i32 : N3VDInt<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr,"32"),
523 v2i32, v2i32, IntOp, Commutable>;
524
525 // 128-bit vector types.
526 def v8i16 : N3VQInt<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr,"16"),
527 v8i16, v8i16, IntOp, Commutable>;
528 def v4i32 : N3VQInt<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr,"32"),
529 v4i32, v4i32, IntOp, Commutable>;
530}
531
532// ....then also with element size of 8 bits:
533multiclass N3VInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
534 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0>
535 : N3VInt_HS<op24, op23, op11_8, op4, OpcodeStr, IntOp, Commutable> {
536 def v8i8 : N3VDInt<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
537 v8i8, v8i8, IntOp, Commutable>;
538 def v16i8 : N3VQInt<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
539 v16i8, v16i8, IntOp, Commutable>;
540}
541
542// ....then also with element size of 64 bits:
543multiclass N3VInt_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
544 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0>
545 : N3VInt_QHS<op24, op23, op11_8, op4, OpcodeStr, IntOp, Commutable> {
546 def v1i64 : N3VDInt<op24, op23, 0b11, op11_8, op4, !strconcat(OpcodeStr,"64"),
547 v1i64, v1i64, IntOp, Commutable>;
548 def v2i64 : N3VQInt<op24, op23, 0b11, op11_8, op4, !strconcat(OpcodeStr,"64"),
549 v2i64, v2i64, IntOp, Commutable>;
550}
551
552
553// Neon Narrowing 3-register vector intrinsics,
554// source operand element sizes of 16, 32 and 64 bits:
555multiclass N3VNInt_HSD<bit op24, bit op23, bits<4> op11_8, bit op4,
556 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0> {
557 def v8i8 : N3VNInt<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr,"16"),
558 v8i8, v8i16, IntOp, Commutable>;
559 def v4i16 : N3VNInt<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr,"32"),
560 v4i16, v4i32, IntOp, Commutable>;
561 def v2i32 : N3VNInt<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr,"64"),
562 v2i32, v2i64, IntOp, Commutable>;
563}
564
565
566// Neon Long 3-register vector intrinsics.
567
568// First with only element sizes of 16 and 32 bits:
569multiclass N3VLInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
570 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0> {
571 def v4i32 : N3VLInt<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr,"16"),
572 v4i32, v4i16, IntOp, Commutable>;
573 def v2i64 : N3VLInt<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr,"32"),
574 v2i64, v2i32, IntOp, Commutable>;
575}
576
577// ....then also with element size of 8 bits:
578multiclass N3VLInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
579 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0>
580 : N3VLInt_HS<op24, op23, op11_8, op4, OpcodeStr, IntOp, Commutable> {
581 def v8i16 : N3VLInt<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
582 v8i16, v8i8, IntOp, Commutable>;
583}
584
585
586// Neon Wide 3-register vector intrinsics,
587// source operand element sizes of 8, 16 and 32 bits:
588multiclass N3VWInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
589 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0> {
590 def v8i16 : N3VWInt<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
591 v8i16, v8i8, IntOp, Commutable>;
592 def v4i32 : N3VWInt<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr,"16"),
593 v4i32, v4i16, IntOp, Commutable>;
594 def v2i64 : N3VWInt<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr,"32"),
595 v2i64, v2i32, IntOp, Commutable>;
596}
597
598
599// Neon Multiply-Op vector operations,
600// element sizes of 8, 16 and 32 bits:
601multiclass N3VMulOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
602 string OpcodeStr, SDNode OpNode> {
603 // 64-bit vector types.
604 def v8i8 : N3VDMulOp<op24, op23, 0b00, op11_8, op4,
605 !strconcat(OpcodeStr, "8"), v8i8, mul, OpNode>;
606 def v4i16 : N3VDMulOp<op24, op23, 0b01, op11_8, op4,
607 !strconcat(OpcodeStr, "16"), v4i16, mul, OpNode>;
608 def v2i32 : N3VDMulOp<op24, op23, 0b10, op11_8, op4,
609 !strconcat(OpcodeStr, "32"), v2i32, mul, OpNode>;
610
611 // 128-bit vector types.
612 def v16i8 : N3VQMulOp<op24, op23, 0b00, op11_8, op4,
613 !strconcat(OpcodeStr, "8"), v16i8, mul, OpNode>;
614 def v8i16 : N3VQMulOp<op24, op23, 0b01, op11_8, op4,
615 !strconcat(OpcodeStr, "16"), v8i16, mul, OpNode>;
616 def v4i32 : N3VQMulOp<op24, op23, 0b10, op11_8, op4,
617 !strconcat(OpcodeStr, "32"), v4i32, mul, OpNode>;
618}
619
620
621// Neon 3-argument intrinsics,
622// element sizes of 8, 16 and 32 bits:
623multiclass N3VInt3_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
624 string OpcodeStr, Intrinsic IntOp> {
625 // 64-bit vector types.
626 def v8i8 : N3VDInt3<op24, op23, 0b00, op11_8, op4,
627 !strconcat(OpcodeStr, "8"), v8i8, v8i8, IntOp>;
628 def v4i16 : N3VDInt3<op24, op23, 0b01, op11_8, op4,
629 !strconcat(OpcodeStr, "16"), v4i16, v4i16, IntOp>;
630 def v2i32 : N3VDInt3<op24, op23, 0b10, op11_8, op4,
631 !strconcat(OpcodeStr, "32"), v2i32, v2i32, IntOp>;
632
633 // 128-bit vector types.
634 def v16i8 : N3VQInt3<op24, op23, 0b00, op11_8, op4,
635 !strconcat(OpcodeStr, "8"), v16i8, v16i8, IntOp>;
636 def v8i16 : N3VQInt3<op24, op23, 0b01, op11_8, op4,
637 !strconcat(OpcodeStr, "16"), v8i16, v8i16, IntOp>;
638 def v4i32 : N3VQInt3<op24, op23, 0b10, op11_8, op4,
639 !strconcat(OpcodeStr, "32"), v4i32, v4i32, IntOp>;
640}
641
642
643// Neon Long 3-argument intrinsics.
644
645// First with only element sizes of 16 and 32 bits:
646multiclass N3VLInt3_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
647 string OpcodeStr, Intrinsic IntOp> {
648 def v4i32 : N3VLInt3<op24, op23, 0b01, op11_8, op4,
649 !strconcat(OpcodeStr, "16"), v4i32, v4i16, IntOp>;
650 def v2i64 : N3VLInt3<op24, op23, 0b10, op11_8, op4,
651 !strconcat(OpcodeStr, "32"), v2i64, v2i32, IntOp>;
652}
653
654// ....then also with element size of 8 bits:
655multiclass N3VLInt3_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
656 string OpcodeStr, Intrinsic IntOp>
657 : N3VLInt3_HS<op24, op23, op11_8, op4, OpcodeStr, IntOp> {
658 def v8i16 : N3VLInt3<op24, op23, 0b01, op11_8, op4,
659 !strconcat(OpcodeStr, "8"), v8i16, v8i8, IntOp>;
660}
661
662
663// Neon 2-register vector intrinsics,
664// element sizes of 8, 16 and 32 bits:
665multiclass N2VInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
666 bits<5> op11_7, bit op4, string OpcodeStr,
667 Intrinsic IntOp> {
668 // 64-bit vector types.
669 def v8i8 : N2VDInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
670 !strconcat(OpcodeStr, "8"), v8i8, v8i8, IntOp>;
671 def v4i16 : N2VDInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
672 !strconcat(OpcodeStr, "16"), v4i16, v4i16, IntOp>;
673 def v2i32 : N2VDInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
674 !strconcat(OpcodeStr, "32"), v2i32, v2i32, IntOp>;
675
676 // 128-bit vector types.
677 def v16i8 : N2VQInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
678 !strconcat(OpcodeStr, "8"), v16i8, v16i8, IntOp>;
679 def v8i16 : N2VQInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
680 !strconcat(OpcodeStr, "16"), v8i16, v8i16, IntOp>;
681 def v4i32 : N2VQInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
682 !strconcat(OpcodeStr, "32"), v4i32, v4i32, IntOp>;
683}
684
685
686// Neon Pairwise long 2-register intrinsics,
687// element sizes of 8, 16 and 32 bits:
688multiclass N2VPLInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
689 bits<5> op11_7, bit op4,
690 string OpcodeStr, Intrinsic IntOp> {
691 // 64-bit vector types.
692 def v8i8 : N2VDPLInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
693 !strconcat(OpcodeStr, "8"), v4i16, v8i8, IntOp>;
694 def v4i16 : N2VDPLInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
695 !strconcat(OpcodeStr, "16"), v2i32, v4i16, IntOp>;
696 def v2i32 : N2VDPLInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
697 !strconcat(OpcodeStr, "32"), v1i64, v2i32, IntOp>;
698
699 // 128-bit vector types.
700 def v16i8 : N2VQPLInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
701 !strconcat(OpcodeStr, "8"), v8i16, v16i8, IntOp>;
702 def v8i16 : N2VQPLInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
703 !strconcat(OpcodeStr, "16"), v4i32, v8i16, IntOp>;
704 def v4i32 : N2VQPLInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
705 !strconcat(OpcodeStr, "32"), v2i64, v4i32, IntOp>;
706}
707
708
709// Neon Pairwise long 2-register accumulate intrinsics,
710// element sizes of 8, 16 and 32 bits:
711multiclass N2VPLInt2_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
712 bits<5> op11_7, bit op4,
713 string OpcodeStr, Intrinsic IntOp> {
714 // 64-bit vector types.
715 def v8i8 : N2VDPLInt2<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
716 !strconcat(OpcodeStr, "8"), v4i16, v8i8, IntOp>;
717 def v4i16 : N2VDPLInt2<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
718 !strconcat(OpcodeStr, "16"), v2i32, v4i16, IntOp>;
719 def v2i32 : N2VDPLInt2<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
720 !strconcat(OpcodeStr, "32"), v1i64, v2i32, IntOp>;
721
722 // 128-bit vector types.
723 def v16i8 : N2VQPLInt2<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
724 !strconcat(OpcodeStr, "8"), v8i16, v16i8, IntOp>;
725 def v8i16 : N2VQPLInt2<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
726 !strconcat(OpcodeStr, "16"), v4i32, v8i16, IntOp>;
727 def v4i32 : N2VQPLInt2<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
728 !strconcat(OpcodeStr, "32"), v2i64, v4i32, IntOp>;
729}
730
731
732// Neon 2-register vector shift by immediate,
733// element sizes of 8, 16, 32 and 64 bits:
734multiclass N2VSh_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
735 string OpcodeStr, SDNode OpNode> {
736 // 64-bit vector types.
737 def v8i8 : N2VDSh<op24, op23, 0b001000, op11_8, 0, op4,
738 !strconcat(OpcodeStr, "8"), v8i8, OpNode>;
739 def v4i16 : N2VDSh<op24, op23, 0b010000, op11_8, 0, op4,
740 !strconcat(OpcodeStr, "16"), v4i16, OpNode>;
741 def v2i32 : N2VDSh<op24, op23, 0b100000, op11_8, 0, op4,
742 !strconcat(OpcodeStr, "32"), v2i32, OpNode>;
743 def v1i64 : N2VDSh<op24, op23, 0b000000, op11_8, 1, op4,
744 !strconcat(OpcodeStr, "64"), v1i64, OpNode>;
745
746 // 128-bit vector types.
747 def v16i8 : N2VQSh<op24, op23, 0b001000, op11_8, 0, op4,
748 !strconcat(OpcodeStr, "8"), v16i8, OpNode>;
749 def v8i16 : N2VQSh<op24, op23, 0b010000, op11_8, 0, op4,
750 !strconcat(OpcodeStr, "16"), v8i16, OpNode>;
751 def v4i32 : N2VQSh<op24, op23, 0b100000, op11_8, 0, op4,
752 !strconcat(OpcodeStr, "32"), v4i32, OpNode>;
753 def v2i64 : N2VQSh<op24, op23, 0b000000, op11_8, 1, op4,
754 !strconcat(OpcodeStr, "64"), v2i64, OpNode>;
755}
756
757
758// Neon Shift-Accumulate vector operations,
759// element sizes of 8, 16, 32 and 64 bits:
760multiclass N2VShAdd_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
761 string OpcodeStr, SDNode ShOp> {
762 // 64-bit vector types.
763 def v8i8 : N2VDShAdd<op24, op23, 0b001000, op11_8, 0, op4,
764 !strconcat(OpcodeStr, "8"), v8i8, ShOp>;
765 def v4i16 : N2VDShAdd<op24, op23, 0b010000, op11_8, 0, op4,
766 !strconcat(OpcodeStr, "16"), v4i16, ShOp>;
767 def v2i32 : N2VDShAdd<op24, op23, 0b100000, op11_8, 0, op4,
768 !strconcat(OpcodeStr, "32"), v2i32, ShOp>;
769 def v1i64 : N2VDShAdd<op24, op23, 0b000000, op11_8, 1, op4,
770 !strconcat(OpcodeStr, "64"), v1i64, ShOp>;
771
772 // 128-bit vector types.
773 def v16i8 : N2VQShAdd<op24, op23, 0b001000, op11_8, 0, op4,
774 !strconcat(OpcodeStr, "8"), v16i8, ShOp>;
775 def v8i16 : N2VQShAdd<op24, op23, 0b010000, op11_8, 0, op4,
776 !strconcat(OpcodeStr, "16"), v8i16, ShOp>;
777 def v4i32 : N2VQShAdd<op24, op23, 0b100000, op11_8, 0, op4,
778 !strconcat(OpcodeStr, "32"), v4i32, ShOp>;
779 def v2i64 : N2VQShAdd<op24, op23, 0b000000, op11_8, 1, op4,
780 !strconcat(OpcodeStr, "64"), v2i64, ShOp>;
781}
782
783
784// Neon Shift-Insert vector operations,
785// element sizes of 8, 16, 32 and 64 bits:
786multiclass N2VShIns_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
787 string OpcodeStr, SDNode ShOp> {
788 // 64-bit vector types.
789 def v8i8 : N2VDShIns<op24, op23, 0b001000, op11_8, 0, op4,
790 !strconcat(OpcodeStr, "8"), v8i8, ShOp>;
791 def v4i16 : N2VDShIns<op24, op23, 0b010000, op11_8, 0, op4,
792 !strconcat(OpcodeStr, "16"), v4i16, ShOp>;
793 def v2i32 : N2VDShIns<op24, op23, 0b100000, op11_8, 0, op4,
794 !strconcat(OpcodeStr, "32"), v2i32, ShOp>;
795 def v1i64 : N2VDShIns<op24, op23, 0b000000, op11_8, 1, op4,
796 !strconcat(OpcodeStr, "64"), v1i64, ShOp>;
797
798 // 128-bit vector types.
799 def v16i8 : N2VQShIns<op24, op23, 0b001000, op11_8, 0, op4,
800 !strconcat(OpcodeStr, "8"), v16i8, ShOp>;
801 def v8i16 : N2VQShIns<op24, op23, 0b010000, op11_8, 0, op4,
802 !strconcat(OpcodeStr, "16"), v8i16, ShOp>;
803 def v4i32 : N2VQShIns<op24, op23, 0b100000, op11_8, 0, op4,
804 !strconcat(OpcodeStr, "32"), v4i32, ShOp>;
805 def v2i64 : N2VQShIns<op24, op23, 0b000000, op11_8, 1, op4,
806 !strconcat(OpcodeStr, "64"), v2i64, ShOp>;
807}
808
809//===----------------------------------------------------------------------===//
810// Instruction Definitions.
811//===----------------------------------------------------------------------===//
812
813// Vector Add Operations.
814
815// VADD : Vector Add (integer and floating-point)
816defm VADD : N3V_QHSD<0, 0, 0b1000, 0, "vadd.i", add, 1>;
817def VADDfd : N3VD<0, 0, 0b00, 0b1101, 0, "vadd.f32", v2f32, v2f32, fadd, 1>;
818def VADDfq : N3VQ<0, 0, 0b00, 0b1101, 0, "vadd.f32", v4f32, v4f32, fadd, 1>;
819// VADDL : Vector Add Long (Q = D + D)
820defm VADDLs : N3VLInt_QHS<0,1,0b0000,0, "vaddl.s", int_arm_neon_vaddls, 1>;
821defm VADDLu : N3VLInt_QHS<1,1,0b0000,0, "vaddl.u", int_arm_neon_vaddlu, 1>;
822// VADDW : Vector Add Wide (Q = Q + D)
823defm VADDWs : N3VWInt_QHS<0,1,0b0001,0, "vaddw.s", int_arm_neon_vaddws, 0>;
824defm VADDWu : N3VWInt_QHS<1,1,0b0001,0, "vaddw.u", int_arm_neon_vaddwu, 0>;
825// VHADD : Vector Halving Add
826defm VHADDs : N3VInt_QHS<0,0,0b0000,0, "vhadd.s", int_arm_neon_vhadds, 1>;
827defm VHADDu : N3VInt_QHS<1,0,0b0000,0, "vhadd.u", int_arm_neon_vhaddu, 1>;
828// VRHADD : Vector Rounding Halving Add
829defm VRHADDs : N3VInt_QHS<0,0,0b0001,0, "vrhadd.s", int_arm_neon_vrhadds, 1>;
830defm VRHADDu : N3VInt_QHS<1,0,0b0001,0, "vrhadd.u", int_arm_neon_vrhaddu, 1>;
831// VQADD : Vector Saturating Add
832defm VQADDs : N3VInt_QHSD<0,0,0b0000,1, "vqadd.s", int_arm_neon_vqadds, 1>;
833defm VQADDu : N3VInt_QHSD<1,0,0b0000,1, "vqadd.u", int_arm_neon_vqaddu, 1>;
834// VADDHN : Vector Add and Narrow Returning High Half (D = Q + Q)
835defm VADDHN : N3VNInt_HSD<0,1,0b0100,0, "vaddhn.i", int_arm_neon_vaddhn, 1>;
836// VRADDHN : Vector Rounding Add and Narrow Returning High Half (D = Q + Q)
837defm VRADDHN : N3VNInt_HSD<1,1,0b0100,0, "vraddhn.i", int_arm_neon_vraddhn, 1>;
838
839// Vector Multiply Operations.
840
841// VMUL : Vector Multiply (integer, polynomial and floating-point)
842defm VMUL : N3V_QHS<0, 0, 0b1001, 1, "vmul.i", mul, 1>;
843def VMULpd : N3VDInt<1, 0, 0b00, 0b1001, 1, "vmul.p8", v8i8, v8i8,
844 int_arm_neon_vmulp, 1>;
845def VMULpq : N3VQInt<1, 0, 0b00, 0b1001, 1, "vmul.p8", v16i8, v16i8,
846 int_arm_neon_vmulp, 1>;
847def VMULfd : N3VD<1, 0, 0b00, 0b1101, 1, "vmul.f32", v2f32, v2f32, fmul, 1>;
848def VMULfq : N3VQ<1, 0, 0b00, 0b1101, 1, "vmul.f32", v4f32, v4f32, fmul, 1>;
849// VQDMULH : Vector Saturating Doubling Multiply Returning High Half
850defm VQDMULH : N3VInt_HS<0,0,0b1011,0, "vqdmulh.s", int_arm_neon_vqdmulh, 1>;
851// VQRDMULH : Vector Rounding Saturating Doubling Multiply Returning High Half
852defm VQRDMULH : N3VInt_HS<1,0,0b1011,0, "vqrdmulh.s", int_arm_neon_vqrdmulh, 1>;
853// VMULL : Vector Multiply Long (integer and polynomial) (Q = D * D)
854defm VMULLs : N3VLInt_QHS<0,1,0b1100,0, "vmull.s", int_arm_neon_vmulls, 1>;
855defm VMULLu : N3VLInt_QHS<1,1,0b1100,0, "vmull.u", int_arm_neon_vmullu, 1>;
856def VMULLp : N3VLInt<0, 1, 0b00, 0b1110, 0, "vmull.p8", v8i16, v8i8,
857 int_arm_neon_vmullp, 1>;
858// VQDMULL : Vector Saturating Doubling Multiply Long (Q = D * D)
859defm VQDMULL : N3VLInt_HS<0,1,0b1101,0, "vqdmull.s", int_arm_neon_vqdmull, 1>;
860
861// Vector Multiply-Accumulate and Multiply-Subtract Operations.
862
863// VMLA : Vector Multiply Accumulate (integer and floating-point)
864defm VMLA : N3VMulOp_QHS<0, 0, 0b1001, 0, "vmla.i", add>;
865def VMLAfd : N3VDMulOp<0, 0, 0b00, 0b1101, 1, "vmla.f32", v2f32, fmul, fadd>;
866def VMLAfq : N3VQMulOp<0, 0, 0b00, 0b1101, 1, "vmla.f32", v4f32, fmul, fadd>;
867// VMLAL : Vector Multiply Accumulate Long (Q += D * D)
868defm VMLALs : N3VLInt3_QHS<0,1,0b1000,0, "vmlal.s", int_arm_neon_vmlals>;
869defm VMLALu : N3VLInt3_QHS<1,1,0b1000,0, "vmlal.u", int_arm_neon_vmlalu>;
870// VQDMLAL : Vector Saturating Doubling Multiply Accumulate Long (Q += D * D)
871defm VQDMLAL : N3VLInt3_HS<0, 1, 0b1001, 0, "vqdmlal.s", int_arm_neon_vqdmlal>;
872// VMLS : Vector Multiply Subtract (integer and floating-point)
873defm VMLS : N3VMulOp_QHS<0, 0, 0b1001, 0, "vmls.i", sub>;
874def VMLSfd : N3VDMulOp<0, 0, 0b10, 0b1101, 1, "vmls.f32", v2f32, fmul, fsub>;
875def VMLSfq : N3VQMulOp<0, 0, 0b10, 0b1101, 1, "vmls.f32", v4f32, fmul, fsub>;
876// VMLSL : Vector Multiply Subtract Long (Q -= D * D)
877defm VMLSLs : N3VLInt3_QHS<0,1,0b1010,0, "vmlsl.s", int_arm_neon_vmlsls>;
878defm VMLSLu : N3VLInt3_QHS<1,1,0b1010,0, "vmlsl.u", int_arm_neon_vmlslu>;
879// VQDMLSL : Vector Saturating Doubling Multiply Subtract Long (Q -= D * D)
880defm VQDMLSL : N3VLInt3_HS<0, 1, 0b1011, 0, "vqdmlsl.s", int_arm_neon_vqdmlsl>;
881
882// Vector Subtract Operations.
883
884// VSUB : Vector Subtract (integer and floating-point)
885defm VSUB : N3V_QHSD<1, 0, 0b1000, 0, "vsub.i", sub, 0>;
886def VSUBfd : N3VD<0, 0, 0b10, 0b1101, 0, "vsub.f32", v2f32, v2f32, fsub, 0>;
887def VSUBfq : N3VQ<0, 0, 0b10, 0b1101, 0, "vsub.f32", v4f32, v4f32, fsub, 0>;
888// VSUBL : Vector Subtract Long (Q = D - D)
889defm VSUBLs : N3VLInt_QHS<0,1,0b0010,0, "vsubl.s", int_arm_neon_vsubls, 1>;
890defm VSUBLu : N3VLInt_QHS<1,1,0b0010,0, "vsubl.u", int_arm_neon_vsublu, 1>;
891// VSUBW : Vector Subtract Wide (Q = Q - D)
892defm VSUBWs : N3VWInt_QHS<0,1,0b0011,0, "vsubw.s", int_arm_neon_vsubws, 0>;
893defm VSUBWu : N3VWInt_QHS<1,1,0b0011,0, "vsubw.u", int_arm_neon_vsubwu, 0>;
894// VHSUB : Vector Halving Subtract
895defm VHSUBs : N3VInt_QHS<0, 0, 0b0010, 0, "vhsub.s", int_arm_neon_vhsubs, 0>;
896defm VHSUBu : N3VInt_QHS<1, 0, 0b0010, 0, "vhsub.u", int_arm_neon_vhsubu, 0>;
897// VQSUB : Vector Saturing Subtract
898defm VQSUBs : N3VInt_QHSD<0, 0, 0b0010, 1, "vqsub.s", int_arm_neon_vqsubs, 0>;
899defm VQSUBu : N3VInt_QHSD<1, 0, 0b0010, 1, "vqsub.u", int_arm_neon_vqsubu, 0>;
900// VSUBHN : Vector Subtract and Narrow Returning High Half (D = Q - Q)
901defm VSUBHN : N3VNInt_HSD<0,1,0b0110,0, "vsubhn.i", int_arm_neon_vsubhn, 0>;
902// VRSUBHN : Vector Rounding Subtract and Narrow Returning High Half (D=Q-Q)
903defm VRSUBHN : N3VNInt_HSD<1,1,0b0110,0, "vrsubhn.i", int_arm_neon_vrsubhn, 0>;
904
905// Vector Comparisons.
906
907// VCEQ : Vector Compare Equal
908defm VCEQ : N3V_QHS<1, 0, 0b1000, 1, "vceq.i", NEONvceq, 1>;
909def VCEQfd : N3VD<0,0,0b00,0b1110,0, "vceq.f32", v2i32, v2f32, NEONvceq, 1>;
910def VCEQfq : N3VQ<0,0,0b00,0b1110,0, "vceq.f32", v4i32, v4f32, NEONvceq, 1>;
911// VCGE : Vector Compare Greater Than or Equal
912defm VCGEs : N3V_QHS<0, 0, 0b0011, 1, "vcge.s", NEONvcge, 0>;
913defm VCGEu : N3V_QHS<1, 0, 0b0011, 1, "vcge.u", NEONvcgeu, 0>;
914def VCGEfd : N3VD<1,0,0b00,0b1110,0, "vcge.f32", v2i32, v2f32, NEONvcge, 0>;
915def VCGEfq : N3VQ<1,0,0b00,0b1110,0, "vcge.f32", v4i32, v4f32, NEONvcge, 0>;
916// VCGT : Vector Compare Greater Than
917defm VCGTs : N3V_QHS<0, 0, 0b0011, 0, "vcgt.s", NEONvcgt, 0>;
918defm VCGTu : N3V_QHS<1, 0, 0b0011, 0, "vcgt.u", NEONvcgtu, 0>;
919def VCGTfd : N3VD<1,0,0b10,0b1110,0, "vcgt.f32", v2i32, v2f32, NEONvcgt, 0>;
920def VCGTfq : N3VQ<1,0,0b10,0b1110,0, "vcgt.f32", v4i32, v4f32, NEONvcgt, 0>;
921// VACGE : Vector Absolute Compare Greater Than or Equal (aka VCAGE)
922def VACGEd : N3VDInt<1, 0, 0b00, 0b1110, 1, "vacge.f32", v2i32, v2f32,
923 int_arm_neon_vacged, 0>;
924def VACGEq : N3VQInt<1, 0, 0b00, 0b1110, 1, "vacge.f32", v4i32, v4f32,
925 int_arm_neon_vacgeq, 0>;
926// VACGT : Vector Absolute Compare Greater Than (aka VCAGT)
927def VACGTd : N3VDInt<1, 0, 0b10, 0b1110, 1, "vacgt.f32", v2i32, v2f32,
928 int_arm_neon_vacgtd, 0>;
929def VACGTq : N3VQInt<1, 0, 0b10, 0b1110, 1, "vacgt.f32", v4i32, v4f32,
930 int_arm_neon_vacgtq, 0>;
931// VTST : Vector Test Bits
932defm VTST : N3V_QHS<0, 0, 0b1000, 1, "vtst.i", NEONvtst, 1>;
933
934// Vector Bitwise Operations.
935
936// VAND : Vector Bitwise AND
937def VANDd : N3VD<0, 0, 0b00, 0b0001, 1, "vand", v2i32, v2i32, and, 1>;
938def VANDq : N3VQ<0, 0, 0b00, 0b0001, 1, "vand", v4i32, v4i32, and, 1>;
939
940// VEOR : Vector Bitwise Exclusive OR
941def VEORd : N3VD<1, 0, 0b00, 0b0001, 1, "veor", v2i32, v2i32, xor, 1>;
942def VEORq : N3VQ<1, 0, 0b00, 0b0001, 1, "veor", v4i32, v4i32, xor, 1>;
943
944// VORR : Vector Bitwise OR
945def VORRd : N3VD<0, 0, 0b10, 0b0001, 1, "vorr", v2i32, v2i32, or, 1>;
946def VORRq : N3VQ<0, 0, 0b10, 0b0001, 1, "vorr", v4i32, v4i32, or, 1>;
947
948// VBIC : Vector Bitwise Bit Clear (AND NOT)
949def VBICd : N3V<0, 0, 0b01, 0b0001, 0, 1, (outs DPR:$dst),
950 (ins DPR:$src1, DPR:$src2), "vbic\t$dst, $src1, $src2", "",
951 [(set DPR:$dst, (v2i32 (and DPR:$src1,(vnot DPR:$src2))))]>;
952def VBICq : N3V<0, 0, 0b01, 0b0001, 1, 1, (outs QPR:$dst),
953 (ins QPR:$src1, QPR:$src2), "vbic\t$dst, $src1, $src2", "",
954 [(set QPR:$dst, (v4i32 (and QPR:$src1,(vnot QPR:$src2))))]>;
955
956// VORN : Vector Bitwise OR NOT
957def VORNd : N3V<0, 0, 0b11, 0b0001, 0, 1, (outs DPR:$dst),
958 (ins DPR:$src1, DPR:$src2), "vorn\t$dst, $src1, $src2", "",
959 [(set DPR:$dst, (v2i32 (or DPR:$src1, (vnot DPR:$src2))))]>;
960def VORNq : N3V<0, 0, 0b11, 0b0001, 1, 1, (outs QPR:$dst),
961 (ins QPR:$src1, QPR:$src2), "vorn\t$dst, $src1, $src2", "",
962 [(set QPR:$dst, (v4i32 (or QPR:$src1, (vnot QPR:$src2))))]>;
963
964// VMVN : Vector Bitwise NOT
965def VMVNd : N2V<0b11, 0b11, 0b00, 0b00, 0b01011, 0, 0,
966 (outs DPR:$dst), (ins DPR:$src), "vmvn\t$dst, $src", "",
967 [(set DPR:$dst, (v2i32 (vnot DPR:$src)))]>;
968def VMVNq : N2V<0b11, 0b11, 0b00, 0b00, 0b01011, 1, 0,
969 (outs QPR:$dst), (ins QPR:$src), "vmvn\t$dst, $src", "",
970 [(set QPR:$dst, (v4i32 (vnot QPR:$src)))]>;
971def : Pat<(v2i32 (vnot_conv DPR:$src)), (VMVNd DPR:$src)>;
972def : Pat<(v4i32 (vnot_conv QPR:$src)), (VMVNq QPR:$src)>;
973
974// VBSL : Vector Bitwise Select
975def VBSLd : N3V<1, 0, 0b01, 0b0001, 0, 1, (outs DPR:$dst),
976 (ins DPR:$src1, DPR:$src2, DPR:$src3),
977 "vbsl\t$dst, $src2, $src3", "$src1 = $dst",
978 [(set DPR:$dst,
979 (v2i32 (or (and DPR:$src2, DPR:$src1),
980 (and DPR:$src3, (vnot DPR:$src1)))))]>;
981def VBSLq : N3V<1, 0, 0b01, 0b0001, 1, 1, (outs QPR:$dst),
982 (ins QPR:$src1, QPR:$src2, QPR:$src3),
983 "vbsl\t$dst, $src2, $src3", "$src1 = $dst",
984 [(set QPR:$dst,
985 (v4i32 (or (and QPR:$src2, QPR:$src1),
986 (and QPR:$src3, (vnot QPR:$src1)))))]>;
987
988// VBIF : Vector Bitwise Insert if False
989// like VBSL but with: "vbif\t$dst, $src3, $src1", "$src2 = $dst",
990// VBIT : Vector Bitwise Insert if True
991// like VBSL but with: "vbit\t$dst, $src2, $src1", "$src3 = $dst",
992// These are not yet implemented. The TwoAddress pass will not go looking
993// for equivalent operations with different register constraints; it just
994// inserts copies.
995
996// Vector Absolute Differences.
997
998// VABD : Vector Absolute Difference
999defm VABDs : N3VInt_QHS<0, 0, 0b0111, 0, "vabd.s", int_arm_neon_vabds, 0>;
1000defm VABDu : N3VInt_QHS<1, 0, 0b0111, 0, "vabd.u", int_arm_neon_vabdu, 0>;
1001def VABDfd : N3VDInt<1, 0, 0b10, 0b1101, 0, "vabd.f32", v2f32, v2f32,
1002 int_arm_neon_vabdf, 0>;
1003def VABDfq : N3VQInt<1, 0, 0b10, 0b1101, 0, "vabd.f32", v4f32, v4f32,
1004 int_arm_neon_vabdf, 0>;
1005
1006// VABDL : Vector Absolute Difference Long (Q = | D - D |)
1007defm VABDLs : N3VLInt_QHS<0,1,0b0111,0, "vabdl.s", int_arm_neon_vabdls, 0>;
1008defm VABDLu : N3VLInt_QHS<1,1,0b0111,0, "vabdl.u", int_arm_neon_vabdlu, 0>;
1009
1010// VABA : Vector Absolute Difference and Accumulate
1011defm VABAs : N3VInt3_QHS<0,1,0b0101,0, "vaba.s", int_arm_neon_vabas>;
1012defm VABAu : N3VInt3_QHS<1,1,0b0101,0, "vaba.u", int_arm_neon_vabau>;
1013
1014// VABAL : Vector Absolute Difference and Accumulate Long (Q += | D - D |)
1015defm VABALs : N3VLInt3_QHS<0,1,0b0101,0, "vabal.s", int_arm_neon_vabals>;
1016defm VABALu : N3VLInt3_QHS<1,1,0b0101,0, "vabal.u", int_arm_neon_vabalu>;
1017
1018// Vector Maximum and Minimum.
1019
1020// VMAX : Vector Maximum
1021defm VMAXs : N3VInt_QHS<0, 0, 0b0110, 0, "vmax.s", int_arm_neon_vmaxs, 1>;
1022defm VMAXu : N3VInt_QHS<1, 0, 0b0110, 0, "vmax.u", int_arm_neon_vmaxu, 1>;
1023def VMAXfd : N3VDInt<0, 0, 0b00, 0b1111, 0, "vmax.f32", v2f32, v2f32,
1024 int_arm_neon_vmaxf, 1>;
1025def VMAXfq : N3VQInt<0, 0, 0b00, 0b1111, 0, "vmax.f32", v4f32, v4f32,
1026 int_arm_neon_vmaxf, 1>;
1027
1028// VMIN : Vector Minimum
1029defm VMINs : N3VInt_QHS<0, 0, 0b0110, 1, "vmin.s", int_arm_neon_vmins, 1>;
1030defm VMINu : N3VInt_QHS<1, 0, 0b0110, 1, "vmin.u", int_arm_neon_vminu, 1>;
1031def VMINfd : N3VDInt<0, 0, 0b10, 0b1111, 0, "vmin.f32", v2f32, v2f32,
1032 int_arm_neon_vminf, 1>;
1033def VMINfq : N3VQInt<0, 0, 0b10, 0b1111, 0, "vmin.f32", v4f32, v4f32,
1034 int_arm_neon_vminf, 1>;
1035
1036// Vector Pairwise Operations.
1037
1038// VPADD : Vector Pairwise Add
1039def VPADDi8 : N3VDInt<0, 0, 0b00, 0b1011, 1, "vpadd.i8", v8i8, v8i8,
1040 int_arm_neon_vpaddi, 0>;
1041def VPADDi16 : N3VDInt<0, 0, 0b01, 0b1011, 1, "vpadd.i16", v4i16, v4i16,
1042 int_arm_neon_vpaddi, 0>;
1043def VPADDi32 : N3VDInt<0, 0, 0b10, 0b1011, 1, "vpadd.i32", v2i32, v2i32,
1044 int_arm_neon_vpaddi, 0>;
1045def VPADDf : N3VDInt<1, 0, 0b00, 0b1101, 0, "vpadd.f32", v2f32, v2f32,
1046 int_arm_neon_vpaddf, 0>;
1047
1048// VPADDL : Vector Pairwise Add Long
1049defm VPADDLs : N2VPLInt_QHS<0b11, 0b11, 0b00, 0b00100, 0, "vpaddl.s",
1050 int_arm_neon_vpaddls>;
1051defm VPADDLu : N2VPLInt_QHS<0b11, 0b11, 0b00, 0b00101, 0, "vpaddl.u",
1052 int_arm_neon_vpaddlu>;
1053
1054// VPADAL : Vector Pairwise Add and Accumulate Long
1055defm VPADALs : N2VPLInt2_QHS<0b11, 0b11, 0b00, 0b00100, 0, "vpadal.s",
1056 int_arm_neon_vpadals>;
1057defm VPADALu : N2VPLInt2_QHS<0b11, 0b11, 0b00, 0b00101, 0, "vpadal.u",
1058 int_arm_neon_vpadalu>;
1059
1060// VPMAX : Vector Pairwise Maximum
1061def VPMAXs8 : N3VDInt<0, 0, 0b00, 0b1010, 0, "vpmax.s8", v8i8, v8i8,
1062 int_arm_neon_vpmaxs, 0>;
1063def VPMAXs16 : N3VDInt<0, 0, 0b01, 0b1010, 0, "vpmax.s16", v4i16, v4i16,
1064 int_arm_neon_vpmaxs, 0>;
1065def VPMAXs32 : N3VDInt<0, 0, 0b10, 0b1010, 0, "vpmax.s32", v2i32, v2i32,
1066 int_arm_neon_vpmaxs, 0>;
1067def VPMAXu8 : N3VDInt<1, 0, 0b00, 0b1010, 0, "vpmax.u8", v8i8, v8i8,
1068 int_arm_neon_vpmaxu, 0>;
1069def VPMAXu16 : N3VDInt<1, 0, 0b01, 0b1010, 0, "vpmax.u16", v4i16, v4i16,
1070 int_arm_neon_vpmaxu, 0>;
1071def VPMAXu32 : N3VDInt<1, 0, 0b10, 0b1010, 0, "vpmax.u32", v2i32, v2i32,
1072 int_arm_neon_vpmaxu, 0>;
1073def VPMAXf : N3VDInt<1, 0, 0b00, 0b1111, 0, "vpmax.f32", v2f32, v2f32,
1074 int_arm_neon_vpmaxf, 0>;
1075
1076// VPMIN : Vector Pairwise Minimum
1077def VPMINs8 : N3VDInt<0, 0, 0b00, 0b1010, 1, "vpmin.s8", v8i8, v8i8,
1078 int_arm_neon_vpmins, 0>;
1079def VPMINs16 : N3VDInt<0, 0, 0b01, 0b1010, 1, "vpmin.s16", v4i16, v4i16,
1080 int_arm_neon_vpmins, 0>;
1081def VPMINs32 : N3VDInt<0, 0, 0b10, 0b1010, 1, "vpmin.s32", v2i32, v2i32,
1082 int_arm_neon_vpmins, 0>;
1083def VPMINu8 : N3VDInt<1, 0, 0b00, 0b1010, 1, "vpmin.u8", v8i8, v8i8,
1084 int_arm_neon_vpminu, 0>;
1085def VPMINu16 : N3VDInt<1, 0, 0b01, 0b1010, 1, "vpmin.u16", v4i16, v4i16,
1086 int_arm_neon_vpminu, 0>;
1087def VPMINu32 : N3VDInt<1, 0, 0b10, 0b1010, 1, "vpmin.u32", v2i32, v2i32,
1088 int_arm_neon_vpminu, 0>;
1089def VPMINf : N3VDInt<1, 0, 0b10, 0b1111, 0, "vpmin.f32", v2f32, v2f32,
1090 int_arm_neon_vpminf, 0>;
1091
1092// Vector Reciprocal and Reciprocal Square Root Estimate and Step.
1093
1094// VRECPE : Vector Reciprocal Estimate
1095def VRECPEd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0, "vrecpe.u32",
1096 v2i32, v2i32, int_arm_neon_vrecpe>;
1097def VRECPEq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0, "vrecpe.u32",
1098 v4i32, v4i32, int_arm_neon_vrecpe>;
1099def VRECPEfd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01010, 0, "vrecpe.f32",
1100 v2f32, v2f32, int_arm_neon_vrecpef>;
1101def VRECPEfq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01010, 0, "vrecpe.f32",
1102 v4f32, v4f32, int_arm_neon_vrecpef>;
1103
1104// VRECPS : Vector Reciprocal Step
1105def VRECPSfd : N3VDInt<0, 0, 0b00, 0b1111, 1, "vrecps.f32", v2f32, v2f32,
1106 int_arm_neon_vrecps, 1>;
1107def VRECPSfq : N3VQInt<0, 0, 0b00, 0b1111, 1, "vrecps.f32", v4f32, v4f32,
1108 int_arm_neon_vrecps, 1>;
1109
1110// VRSQRTE : Vector Reciprocal Square Root Estimate
1111def VRSQRTEd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01001, 0, "vrsqrte.u32",
1112 v2i32, v2i32, int_arm_neon_vrsqrte>;
1113def VRSQRTEq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01001, 0, "vrsqrte.u32",
1114 v4i32, v4i32, int_arm_neon_vrsqrte>;
1115def VRSQRTEfd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01011, 0, "vrsqrte.f32",
1116 v2f32, v2f32, int_arm_neon_vrsqrtef>;
1117def VRSQRTEfq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01011, 0, "vrsqrte.f32",
1118 v4f32, v4f32, int_arm_neon_vrsqrtef>;
1119
1120// VRSQRTS : Vector Reciprocal Square Root Step
1121def VRSQRTSfd : N3VDInt<0, 0, 0b10, 0b1111, 1, "vrsqrts.f32", v2f32, v2f32,
1122 int_arm_neon_vrsqrts, 1>;
1123def VRSQRTSfq : N3VQInt<0, 0, 0b10, 0b1111, 1, "vrsqrts.f32", v4f32, v4f32,
1124 int_arm_neon_vrsqrts, 1>;
1125
1126// Vector Shifts.
1127
1128// VSHL : Vector Shift
1129defm VSHLs : N3VInt_QHSD<0, 0, 0b0100, 0, "vshl.s", int_arm_neon_vshifts, 0>;
1130defm VSHLu : N3VInt_QHSD<1, 0, 0b0100, 0, "vshl.u", int_arm_neon_vshiftu, 0>;
1131// VSHL : Vector Shift Left (Immediate)
1132defm VSHLi : N2VSh_QHSD<0, 1, 0b0111, 1, "vshl.i", NEONvshl>;
1133// VSHR : Vector Shift Right (Immediate)
1134defm VSHRs : N2VSh_QHSD<0, 1, 0b0000, 1, "vshr.s", NEONvshrs>;
1135defm VSHRu : N2VSh_QHSD<1, 1, 0b0000, 1, "vshr.u", NEONvshru>;
1136
1137// VSHLL : Vector Shift Left Long
1138def VSHLLs8 : N2VLSh<0, 1, 0b001000, 0b1010, 0, 0, 1, "vshll.s8",
1139 v8i16, v8i8, NEONvshlls>;
1140def VSHLLs16 : N2VLSh<0, 1, 0b010000, 0b1010, 0, 0, 1, "vshll.s16",
1141 v4i32, v4i16, NEONvshlls>;
1142def VSHLLs32 : N2VLSh<0, 1, 0b100000, 0b1010, 0, 0, 1, "vshll.s32",
1143 v2i64, v2i32, NEONvshlls>;
1144def VSHLLu8 : N2VLSh<1, 1, 0b001000, 0b1010, 0, 0, 1, "vshll.u8",
1145 v8i16, v8i8, NEONvshllu>;
1146def VSHLLu16 : N2VLSh<1, 1, 0b010000, 0b1010, 0, 0, 1, "vshll.u16",
1147 v4i32, v4i16, NEONvshllu>;
1148def VSHLLu32 : N2VLSh<1, 1, 0b100000, 0b1010, 0, 0, 1, "vshll.u32",
1149 v2i64, v2i32, NEONvshllu>;
1150
1151// VSHLL : Vector Shift Left Long (with maximum shift count)
1152def VSHLLi8 : N2VLSh<1, 1, 0b110010, 0b0011, 0, 0, 0, "vshll.i8",
1153 v8i16, v8i8, NEONvshlli>;
1154def VSHLLi16 : N2VLSh<1, 1, 0b110110, 0b0011, 0, 0, 0, "vshll.i16",
1155 v4i32, v4i16, NEONvshlli>;
1156def VSHLLi32 : N2VLSh<1, 1, 0b111010, 0b0011, 0, 0, 0, "vshll.i32",
1157 v2i64, v2i32, NEONvshlli>;
1158
1159// VSHRN : Vector Shift Right and Narrow
1160def VSHRN16 : N2VNSh<0, 1, 0b001000, 0b1000, 0, 0, 1, "vshrn.i16",
1161 v8i8, v8i16, NEONvshrn>;
1162def VSHRN32 : N2VNSh<0, 1, 0b010000, 0b1000, 0, 0, 1, "vshrn.i32",
1163 v4i16, v4i32, NEONvshrn>;
1164def VSHRN64 : N2VNSh<0, 1, 0b100000, 0b1000, 0, 0, 1, "vshrn.i64",
1165 v2i32, v2i64, NEONvshrn>;
1166
1167// VRSHL : Vector Rounding Shift
1168defm VRSHLs : N3VInt_QHSD<0,0,0b0101,0, "vrshl.s", int_arm_neon_vrshifts, 0>;
1169defm VRSHLu : N3VInt_QHSD<1,0,0b0101,0, "vrshl.u", int_arm_neon_vrshiftu, 0>;
1170// VRSHR : Vector Rounding Shift Right
1171defm VRSHRs : N2VSh_QHSD<0, 1, 0b0010, 1, "vrshr.s", NEONvrshrs>;
1172defm VRSHRu : N2VSh_QHSD<1, 1, 0b0010, 1, "vrshr.u", NEONvrshru>;
1173
1174// VRSHRN : Vector Rounding Shift Right and Narrow
1175def VRSHRN16 : N2VNSh<0, 1, 0b001000, 0b1000, 0, 1, 1, "vrshrn.i16",
1176 v8i8, v8i16, NEONvrshrn>;
1177def VRSHRN32 : N2VNSh<0, 1, 0b010000, 0b1000, 0, 1, 1, "vrshrn.i32",
1178 v4i16, v4i32, NEONvrshrn>;
1179def VRSHRN64 : N2VNSh<0, 1, 0b100000, 0b1000, 0, 1, 1, "vrshrn.i64",
1180 v2i32, v2i64, NEONvrshrn>;
1181
1182// VQSHL : Vector Saturating Shift
1183defm VQSHLs : N3VInt_QHSD<0,0,0b0100,1, "vqshl.s", int_arm_neon_vqshifts, 0>;
1184defm VQSHLu : N3VInt_QHSD<1,0,0b0100,1, "vqshl.u", int_arm_neon_vqshiftu, 0>;
1185// VQSHL : Vector Saturating Shift Left (Immediate)
1186defm VQSHLsi : N2VSh_QHSD<0, 1, 0b0111, 1, "vqshl.s", NEONvqshls>;
1187defm VQSHLui : N2VSh_QHSD<1, 1, 0b0111, 1, "vqshl.u", NEONvqshlu>;
1188// VQSHLU : Vector Saturating Shift Left (Immediate, Unsigned)
1189defm VQSHLsu : N2VSh_QHSD<1, 1, 0b0110, 1, "vqshlu.s", NEONvqshlsu>;
1190
1191// VQSHRN : Vector Saturating Shift Right and Narrow
1192def VQSHRNs16 : N2VNSh<0, 1, 0b001000, 0b1001, 0, 0, 1, "vqshrn.s16",
1193 v8i8, v8i16, NEONvqshrns>;
1194def VQSHRNs32 : N2VNSh<0, 1, 0b010000, 0b1001, 0, 0, 1, "vqshrn.s32",
1195 v4i16, v4i32, NEONvqshrns>;
1196def VQSHRNs64 : N2VNSh<0, 1, 0b100000, 0b1001, 0, 0, 1, "vqshrn.s64",
1197 v2i32, v2i64, NEONvqshrns>;
1198def VQSHRNu16 : N2VNSh<1, 1, 0b001000, 0b1001, 0, 0, 1, "vqshrn.u16",
1199 v8i8, v8i16, NEONvqshrnu>;
1200def VQSHRNu32 : N2VNSh<1, 1, 0b010000, 0b1001, 0, 0, 1, "vqshrn.u32",
1201 v4i16, v4i32, NEONvqshrnu>;
1202def VQSHRNu64 : N2VNSh<1, 1, 0b100000, 0b1001, 0, 0, 1, "vqshrn.u64",
1203 v2i32, v2i64, NEONvqshrnu>;
1204
1205// VQSHRUN : Vector Saturating Shift Right and Narrow (Unsigned)
1206def VQSHRUN16 : N2VNSh<1, 1, 0b001000, 0b1000, 0, 0, 1, "vqshrun.s16",
1207 v8i8, v8i16, NEONvqshrnsu>;
1208def VQSHRUN32 : N2VNSh<1, 1, 0b010000, 0b1000, 0, 0, 1, "vqshrun.s32",
1209 v4i16, v4i32, NEONvqshrnsu>;
1210def VQSHRUN64 : N2VNSh<1, 1, 0b100000, 0b1000, 0, 0, 1, "vqshrun.s64",
1211 v2i32, v2i64, NEONvqshrnsu>;
1212
1213// VQRSHL : Vector Saturating Rounding Shift
1214defm VQRSHLs : N3VInt_QHSD<0, 0, 0b0101, 1, "vqrshl.s",
1215 int_arm_neon_vqrshifts, 0>;
1216defm VQRSHLu : N3VInt_QHSD<1, 0, 0b0101, 1, "vqrshl.u",
1217 int_arm_neon_vqrshiftu, 0>;
1218
1219// VQRSHRN : Vector Saturating Rounding Shift Right and Narrow
1220def VQRSHRNs16: N2VNSh<0, 1, 0b001000, 0b1001, 0, 1, 1, "vqrshrn.s16",
1221 v8i8, v8i16, NEONvqrshrns>;
1222def VQRSHRNs32: N2VNSh<0, 1, 0b010000, 0b1001, 0, 1, 1, "vqrshrn.s32",
1223 v4i16, v4i32, NEONvqrshrns>;
1224def VQRSHRNs64: N2VNSh<0, 1, 0b100000, 0b1001, 0, 1, 1, "vqrshrn.s64",
1225 v2i32, v2i64, NEONvqrshrns>;
1226def VQRSHRNu16: N2VNSh<1, 1, 0b001000, 0b1001, 0, 1, 1, "vqrshrn.u16",
1227 v8i8, v8i16, NEONvqrshrnu>;
1228def VQRSHRNu32: N2VNSh<1, 1, 0b010000, 0b1001, 0, 1, 1, "vqrshrn.u32",
1229 v4i16, v4i32, NEONvqrshrnu>;
1230def VQRSHRNu64: N2VNSh<1, 1, 0b100000, 0b1001, 0, 1, 1, "vqrshrn.u64",
1231 v2i32, v2i64, NEONvqrshrnu>;
1232
1233// VQRSHRUN : Vector Saturating Rounding Shift Right and Narrow (Unsigned)
1234def VQRSHRUN16: N2VNSh<1, 1, 0b001000, 0b1000, 0, 1, 1, "vqrshrun.s16",
1235 v8i8, v8i16, NEONvqrshrnsu>;
1236def VQRSHRUN32: N2VNSh<1, 1, 0b010000, 0b1000, 0, 1, 1, "vqrshrun.s32",
1237 v4i16, v4i32, NEONvqrshrnsu>;
1238def VQRSHRUN64: N2VNSh<1, 1, 0b100000, 0b1000, 0, 1, 1, "vqrshrun.s64",
1239 v2i32, v2i64, NEONvqrshrnsu>;
1240
1241// VSRA : Vector Shift Right and Accumulate
1242defm VSRAs : N2VShAdd_QHSD<0, 1, 0b0001, 1, "vsra.s", NEONvshrs>;
1243defm VSRAu : N2VShAdd_QHSD<1, 1, 0b0001, 1, "vsra.u", NEONvshru>;
1244// VRSRA : Vector Rounding Shift Right and Accumulate
1245defm VRSRAs : N2VShAdd_QHSD<0, 1, 0b0011, 1, "vrsra.s", NEONvrshrs>;
1246defm VRSRAu : N2VShAdd_QHSD<1, 1, 0b0011, 1, "vrsra.u", NEONvrshru>;
1247
1248// VSLI : Vector Shift Left and Insert
1249defm VSLI : N2VShIns_QHSD<1, 1, 0b0101, 1, "vsli.", NEONvsli>;
1250// VSRI : Vector Shift Right and Insert
1251defm VSRI : N2VShIns_QHSD<1, 1, 0b0100, 1, "vsri.", NEONvsri>;
1252
1253// Vector Absolute and Saturating Absolute.
1254
1255// VABS : Vector Absolute Value
1256defm VABS : N2VInt_QHS<0b11, 0b11, 0b01, 0b00110, 0, "vabs.s",
1257 int_arm_neon_vabs>;
1258def VABSfd : N2VDInt<0b11, 0b11, 0b10, 0b01, 0b01110, 0, "vabs.f32",
1259 v2f32, v2f32, int_arm_neon_vabsf>;
1260def VABSfq : N2VQInt<0b11, 0b11, 0b10, 0b01, 0b01110, 0, "vabs.f32",
1261 v4f32, v4f32, int_arm_neon_vabsf>;
1262
1263// VQABS : Vector Saturating Absolute Value
1264defm VQABS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01110, 0, "vqabs.s",
1265 int_arm_neon_vqabs>;
1266
1267// Vector Negate.
1268
1269def vneg : PatFrag<(ops node:$in), (sub immAllZerosV, node:$in)>;
1270def vneg_conv : PatFrag<(ops node:$in), (sub immAllZerosV_bc, node:$in)>;
1271
1272class VNEGD<bits<2> size, string OpcodeStr, ValueType Ty>
1273 : N2V<0b11, 0b11, size, 0b01, 0b00111, 0, 0, (outs DPR:$dst), (ins DPR:$src),
1274 !strconcat(OpcodeStr, "\t$dst, $src"), "",
1275 [(set DPR:$dst, (Ty (vneg DPR:$src)))]>;
1276class VNEGQ<bits<2> size, string OpcodeStr, ValueType Ty>
1277 : N2V<0b11, 0b11, size, 0b01, 0b00111, 1, 0, (outs QPR:$dst), (ins QPR:$src),
1278 !strconcat(OpcodeStr, "\t$dst, $src"), "",
1279 [(set QPR:$dst, (Ty (vneg QPR:$src)))]>;
1280
1281// VNEG : Vector Negate
1282def VNEGs8d : VNEGD<0b00, "vneg.s8", v8i8>;
1283def VNEGs16d : VNEGD<0b01, "vneg.s16", v4i16>;
1284def VNEGs32d : VNEGD<0b10, "vneg.s32", v2i32>;
1285def VNEGs8q : VNEGQ<0b00, "vneg.s8", v16i8>;
1286def VNEGs16q : VNEGQ<0b01, "vneg.s16", v8i16>;
1287def VNEGs32q : VNEGQ<0b10, "vneg.s32", v4i32>;
1288
1289// VNEG : Vector Negate (floating-point)
1290def VNEGf32d : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 0, 0,
1291 (outs DPR:$dst), (ins DPR:$src), "vneg.f32\t$dst, $src", "",
1292 [(set DPR:$dst, (v2f32 (fneg DPR:$src)))]>;
1293def VNEGf32q : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 1, 0,
1294 (outs QPR:$dst), (ins QPR:$src), "vneg.f32\t$dst, $src", "",
1295 [(set QPR:$dst, (v4f32 (fneg QPR:$src)))]>;
1296
1297def : Pat<(v8i8 (vneg_conv DPR:$src)), (VNEGs8d DPR:$src)>;
1298def : Pat<(v4i16 (vneg_conv DPR:$src)), (VNEGs16d DPR:$src)>;
1299def : Pat<(v2i32 (vneg_conv DPR:$src)), (VNEGs32d DPR:$src)>;
1300def : Pat<(v16i8 (vneg_conv QPR:$src)), (VNEGs8q QPR:$src)>;
1301def : Pat<(v8i16 (vneg_conv QPR:$src)), (VNEGs16q QPR:$src)>;
1302def : Pat<(v4i32 (vneg_conv QPR:$src)), (VNEGs32q QPR:$src)>;
1303
1304// VQNEG : Vector Saturating Negate
1305defm VQNEG : N2VInt_QHS<0b11, 0b11, 0b00, 0b01111, 0, "vqneg.s",
1306 int_arm_neon_vqneg>;
1307
1308// Vector Bit Counting Operations.
1309
1310// VCLS : Vector Count Leading Sign Bits
1311defm VCLS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01000, 0, "vcls.s",
1312 int_arm_neon_vcls>;
1313// VCLZ : Vector Count Leading Zeros
1314defm VCLZ : N2VInt_QHS<0b11, 0b11, 0b00, 0b01001, 0, "vclz.i",
1315 int_arm_neon_vclz>;
1316// VCNT : Vector Count One Bits
1317def VCNTd : N2VDInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0, "vcnt.8",
1318 v8i8, v8i8, int_arm_neon_vcnt>;
1319def VCNTq : N2VQInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0, "vcnt.8",
1320 v16i8, v16i8, int_arm_neon_vcnt>;
1321
1322// Vector Move Operations.
1323
1324// VMOV : Vector Move (Register)
1325
1326def VMOVD : N3V<0, 0, 0b10, 0b0001, 0, 1, (outs DPR:$dst), (ins DPR:$src),
1327 "vmov\t$dst, $src", "", []>;
1328def VMOVQ : N3V<0, 0, 0b10, 0b0001, 1, 1, (outs QPR:$dst), (ins QPR:$src),
1329 "vmov\t$dst, $src", "", []>;
1330
1331// VMOV : Vector Move (Immediate)
1332
1333// VMOV_get_imm8 xform function: convert build_vector to VMOV.i8 imm.
1334def VMOV_get_imm8 : SDNodeXForm<build_vector, [{
1335 return ARM::getVMOVImm(N, 1, *CurDAG);
1336}]>;
1337def vmovImm8 : PatLeaf<(build_vector), [{
1338 return ARM::getVMOVImm(N, 1, *CurDAG).getNode() != 0;
1339}], VMOV_get_imm8>;
1340
1341// VMOV_get_imm16 xform function: convert build_vector to VMOV.i16 imm.
1342def VMOV_get_imm16 : SDNodeXForm<build_vector, [{
1343 return ARM::getVMOVImm(N, 2, *CurDAG);
1344}]>;
1345def vmovImm16 : PatLeaf<(build_vector), [{
1346 return ARM::getVMOVImm(N, 2, *CurDAG).getNode() != 0;
1347}], VMOV_get_imm16>;
1348
1349// VMOV_get_imm32 xform function: convert build_vector to VMOV.i32 imm.
1350def VMOV_get_imm32 : SDNodeXForm<build_vector, [{
1351 return ARM::getVMOVImm(N, 4, *CurDAG);
1352}]>;
1353def vmovImm32 : PatLeaf<(build_vector), [{
1354 return ARM::getVMOVImm(N, 4, *CurDAG).getNode() != 0;
1355}], VMOV_get_imm32>;
1356
1357// VMOV_get_imm64 xform function: convert build_vector to VMOV.i64 imm.
1358def VMOV_get_imm64 : SDNodeXForm<build_vector, [{
1359 return ARM::getVMOVImm(N, 8, *CurDAG);
1360}]>;
1361def vmovImm64 : PatLeaf<(build_vector), [{
1362 return ARM::getVMOVImm(N, 8, *CurDAG).getNode() != 0;
1363}], VMOV_get_imm64>;
1364
1365// Note: Some of the cmode bits in the following VMOV instructions need to
1366// be encoded based on the immed values.
1367
1368def VMOVv8i8 : N1ModImm<1, 0b000, 0b1110, 0, 0, 0, 1, (outs DPR:$dst),
1369 (ins i8imm:$SIMM), "vmov.i8\t$dst, $SIMM", "",
1370 [(set DPR:$dst, (v8i8 vmovImm8:$SIMM))]>;
1371def VMOVv16i8 : N1ModImm<1, 0b000, 0b1110, 0, 1, 0, 1, (outs QPR:$dst),
1372 (ins i8imm:$SIMM), "vmov.i8\t$dst, $SIMM", "",
1373 [(set QPR:$dst, (v16i8 vmovImm8:$SIMM))]>;
1374
1375def VMOVv4i16 : N1ModImm<1, 0b000, 0b1000, 0, 0, 0, 1, (outs DPR:$dst),
1376 (ins i16imm:$SIMM), "vmov.i16\t$dst, $SIMM", "",
1377 [(set DPR:$dst, (v4i16 vmovImm16:$SIMM))]>;
1378def VMOVv8i16 : N1ModImm<1, 0b000, 0b1000, 0, 1, 0, 1, (outs QPR:$dst),
1379 (ins i16imm:$SIMM), "vmov.i16\t$dst, $SIMM", "",
1380 [(set QPR:$dst, (v8i16 vmovImm16:$SIMM))]>;
1381
1382def VMOVv2i32 : N1ModImm<1, 0b000, 0b0000, 0, 0, 0, 1, (outs DPR:$dst),
1383 (ins i32imm:$SIMM), "vmov.i32\t$dst, $SIMM", "",
1384 [(set DPR:$dst, (v2i32 vmovImm32:$SIMM))]>;
1385def VMOVv4i32 : N1ModImm<1, 0b000, 0b0000, 0, 1, 0, 1, (outs QPR:$dst),
1386 (ins i32imm:$SIMM), "vmov.i32\t$dst, $SIMM", "",
1387 [(set QPR:$dst, (v4i32 vmovImm32:$SIMM))]>;
1388
1389def VMOVv1i64 : N1ModImm<1, 0b000, 0b1110, 0, 0, 1, 1, (outs DPR:$dst),
1390 (ins i64imm:$SIMM), "vmov.i64\t$dst, $SIMM", "",
1391 [(set DPR:$dst, (v1i64 vmovImm64:$SIMM))]>;
1392def VMOVv2i64 : N1ModImm<1, 0b000, 0b1110, 0, 1, 1, 1, (outs QPR:$dst),
1393 (ins i64imm:$SIMM), "vmov.i64\t$dst, $SIMM", "",
1394 [(set QPR:$dst, (v2i64 vmovImm64:$SIMM))]>;
1395
1396// VMOV : Vector Get Lane (move scalar to ARM core register)
1397
1398def VGETLNs8 : NVGetLane<0b11100101, 0b1011, 0b00,
1399 (outs GPR:$dst), (ins DPR:$src, i32imm:$lane),
1400 "vmov", ".s8\t$dst, $src[$lane]",
1401 [(set GPR:$dst, (NEONvgetlanes (v8i8 DPR:$src),
1402 imm:$lane))]>;
1403def VGETLNs16 : NVGetLane<0b11100001, 0b1011, 0b01,
1404 (outs GPR:$dst), (ins DPR:$src, i32imm:$lane),
1405 "vmov", ".s16\t$dst, $src[$lane]",
1406 [(set GPR:$dst, (NEONvgetlanes (v4i16 DPR:$src),
1407 imm:$lane))]>;
1408def VGETLNu8 : NVGetLane<0b11101101, 0b1011, 0b00,
1409 (outs GPR:$dst), (ins DPR:$src, i32imm:$lane),
1410 "vmov", ".u8\t$dst, $src[$lane]",
1411 [(set GPR:$dst, (NEONvgetlaneu (v8i8 DPR:$src),
1412 imm:$lane))]>;
1413def VGETLNu16 : NVGetLane<0b11101001, 0b1011, 0b01,
1414 (outs GPR:$dst), (ins DPR:$src, i32imm:$lane),
1415 "vmov", ".u16\t$dst, $src[$lane]",
1416 [(set GPR:$dst, (NEONvgetlaneu (v4i16 DPR:$src),
1417 imm:$lane))]>;
1418def VGETLNi32 : NVGetLane<0b11100001, 0b1011, 0b00,
1419 (outs GPR:$dst), (ins DPR:$src, i32imm:$lane),
1420 "vmov", ".32\t$dst, $src[$lane]",
1421 [(set GPR:$dst, (extractelt (v2i32 DPR:$src),
1422 imm:$lane))]>;
1423// def VGETLNf32: see FMRDH and FMRDL in ARMInstrVFP.td
1424def : Pat<(NEONvgetlanes (v16i8 QPR:$src), imm:$lane),
1425 (VGETLNs8 (v8i8 (EXTRACT_SUBREG QPR:$src,
1426 (SubReg_i8_reg imm:$lane))),
1427 (SubReg_i8_lane imm:$lane))>;
1428def : Pat<(NEONvgetlanes (v8i16 QPR:$src), imm:$lane),
1429 (VGETLNs16 (v4i16 (EXTRACT_SUBREG QPR:$src,
1430 (SubReg_i16_reg imm:$lane))),
1431 (SubReg_i16_lane imm:$lane))>;
1432def : Pat<(NEONvgetlaneu (v16i8 QPR:$src), imm:$lane),
1433 (VGETLNu8 (v8i8 (EXTRACT_SUBREG QPR:$src,
1434 (SubReg_i8_reg imm:$lane))),
1435 (SubReg_i8_lane imm:$lane))>;
1436def : Pat<(NEONvgetlaneu (v8i16 QPR:$src), imm:$lane),
1437 (VGETLNu16 (v4i16 (EXTRACT_SUBREG QPR:$src,
1438 (SubReg_i16_reg imm:$lane))),
1439 (SubReg_i16_lane imm:$lane))>;
1440def : Pat<(extractelt (v4i32 QPR:$src), imm:$lane),
1441 (VGETLNi32 (v2i32 (EXTRACT_SUBREG QPR:$src,
1442 (SubReg_i32_reg imm:$lane))),
1443 (SubReg_i32_lane imm:$lane))>;
1444//def : Pat<(extractelt (v2i64 QPR:$src1), imm:$src2),
1445// (EXTRACT_SUBREG QPR:$src1, (SubReg_f64_reg imm:$src2))>;
1446def : Pat<(extractelt (v2f64 QPR:$src1), imm:$src2),
1447 (EXTRACT_SUBREG QPR:$src1, (SubReg_f64_reg imm:$src2))>;
1448
1449
1450// VMOV : Vector Set Lane (move ARM core register to scalar)
1451
1452let Constraints = "$src1 = $dst" in {
1453def VSETLNi8 : NVSetLane<0b11100100, 0b1011, 0b00, (outs DPR:$dst),
1454 (ins DPR:$src1, GPR:$src2, i32imm:$lane),
1455 "vmov", ".8\t$dst[$lane], $src2",
1456 [(set DPR:$dst, (vector_insert (v8i8 DPR:$src1),
1457 GPR:$src2, imm:$lane))]>;
1458def VSETLNi16 : NVSetLane<0b11100000, 0b1011, 0b01, (outs DPR:$dst),
1459 (ins DPR:$src1, GPR:$src2, i32imm:$lane),
1460 "vmov", ".16\t$dst[$lane], $src2",
1461 [(set DPR:$dst, (vector_insert (v4i16 DPR:$src1),
1462 GPR:$src2, imm:$lane))]>;
1463def VSETLNi32 : NVSetLane<0b11100000, 0b1011, 0b00, (outs DPR:$dst),
1464 (ins DPR:$src1, GPR:$src2, i32imm:$lane),
1465 "vmov", ".32\t$dst[$lane], $src2",
1466 [(set DPR:$dst, (insertelt (v2i32 DPR:$src1),
1467 GPR:$src2, imm:$lane))]>;
1468}
1469def : Pat<(vector_insert (v16i8 QPR:$src1), GPR:$src2, imm:$lane),
1470 (v16i8 (INSERT_SUBREG QPR:$src1,
1471 (VSETLNi8 (v8i8 (EXTRACT_SUBREG QPR:$src1,
1472 (SubReg_i8_reg imm:$lane))),
1473 GPR:$src2, (SubReg_i8_lane imm:$lane)),
1474 (SubReg_i8_reg imm:$lane)))>;
1475def : Pat<(vector_insert (v8i16 QPR:$src1), GPR:$src2, imm:$lane),
1476 (v8i16 (INSERT_SUBREG QPR:$src1,
1477 (VSETLNi16 (v4i16 (EXTRACT_SUBREG QPR:$src1,
1478 (SubReg_i16_reg imm:$lane))),
1479 GPR:$src2, (SubReg_i16_lane imm:$lane)),
1480 (SubReg_i16_reg imm:$lane)))>;
1481def : Pat<(insertelt (v4i32 QPR:$src1), GPR:$src2, imm:$lane),
1482 (v4i32 (INSERT_SUBREG QPR:$src1,
1483 (VSETLNi32 (v2i32 (EXTRACT_SUBREG QPR:$src1,
1484 (SubReg_i32_reg imm:$lane))),
1485 GPR:$src2, (SubReg_i32_lane imm:$lane)),
1486 (SubReg_i32_reg imm:$lane)))>;
1487
1488//def : Pat<(v2i64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)),
1489// (INSERT_SUBREG QPR:$src1, DPR:$src2, (SubReg_f64_reg imm:$src3))>;
1490def : Pat<(v2f64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)),
1491 (INSERT_SUBREG QPR:$src1, DPR:$src2, (SubReg_f64_reg imm:$src3))>;
1492
1493// VDUP : Vector Duplicate (from ARM core register to all elements)
1494
1495def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
1496 (vector_shuffle node:$lhs, node:$rhs), [{
1497 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1498 return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
1499}]>;
1500
1501class VDUPD<bits<8> opcod1, bits<2> opcod3, string asmSize, ValueType Ty>
1502 : NVDup<opcod1, 0b1011, opcod3, (outs DPR:$dst), (ins GPR:$src),
1503 "vdup", !strconcat(asmSize, "\t$dst, $src"),
1504 [(set DPR:$dst, (Ty (splat_lo (scalar_to_vector GPR:$src), undef)))]>;
1505class VDUPQ<bits<8> opcod1, bits<2> opcod3, string asmSize, ValueType Ty>
1506 : NVDup<opcod1, 0b1011, opcod3, (outs QPR:$dst), (ins GPR:$src),
1507 "vdup", !strconcat(asmSize, "\t$dst, $src"),
1508 [(set QPR:$dst, (Ty (splat_lo (scalar_to_vector GPR:$src), undef)))]>;
1509
1510def VDUP8d : VDUPD<0b11101100, 0b00, ".8", v8i8>;
1511def VDUP16d : VDUPD<0b11101000, 0b01, ".16", v4i16>;
1512def VDUP32d : VDUPD<0b11101000, 0b00, ".32", v2i32>;
1513def VDUP8q : VDUPQ<0b11101110, 0b00, ".8", v16i8>;
1514def VDUP16q : VDUPQ<0b11101010, 0b01, ".16", v8i16>;
1515def VDUP32q : VDUPQ<0b11101010, 0b00, ".32", v4i32>;
1516
1517def VDUPfd : NVDup<0b11101000, 0b1011, 0b00, (outs DPR:$dst), (ins GPR:$src),
1518 "vdup", ".32\t$dst, $src",
1519 [(set DPR:$dst, (v2f32 (splat_lo
1520 (scalar_to_vector
1521 (f32 (bitconvert GPR:$src))),
1522 undef)))]>;
1523def VDUPfq : NVDup<0b11101010, 0b1011, 0b00, (outs QPR:$dst), (ins GPR:$src),
1524 "vdup", ".32\t$dst, $src",
1525 [(set QPR:$dst, (v4f32 (splat_lo
1526 (scalar_to_vector
1527 (f32 (bitconvert GPR:$src))),
1528 undef)))]>;
1529
1530// VDUP : Vector Duplicate Lane (from scalar to all elements)
1531
1532def SHUFFLE_get_splat_lane : SDNodeXForm<vector_shuffle, [{
1533 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1534 return CurDAG->getTargetConstant(SVOp->getSplatIndex(), MVT::i32);
1535}]>;
1536
1537def splat_lane : PatFrag<(ops node:$lhs, node:$rhs),
1538 (vector_shuffle node:$lhs, node:$rhs), [{
1539 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1540 return SVOp->isSplat();
1541}], SHUFFLE_get_splat_lane>;
1542
1543class VDUPLND<bits<2> op19_18, bits<2> op17_16, string OpcodeStr, ValueType Ty>
1544 : N2V<0b11, 0b11, op19_18, op17_16, 0b11000, 0, 0,
1545 (outs DPR:$dst), (ins DPR:$src, i32imm:$lane),
1546 !strconcat(OpcodeStr, "\t$dst, $src[$lane]"), "",
1547 [(set DPR:$dst, (Ty (splat_lane:$lane DPR:$src, undef)))]>;
1548
1549// vector_shuffle requires that the source and destination types match, so
1550// VDUP to a 128-bit result uses a target-specific VDUPLANEQ node.
1551class VDUPLNQ<bits<2> op19_18, bits<2> op17_16, string OpcodeStr,
1552 ValueType ResTy, ValueType OpTy>
1553 : N2V<0b11, 0b11, op19_18, op17_16, 0b11000, 1, 0,
1554 (outs QPR:$dst), (ins DPR:$src, i32imm:$lane),
1555 !strconcat(OpcodeStr, "\t$dst, $src[$lane]"), "",
1556 [(set QPR:$dst, (ResTy (NEONvduplaneq (OpTy DPR:$src), imm:$lane)))]>;
1557
1558def VDUPLN8d : VDUPLND<0b00, 0b01, "vdup.8", v8i8>;
1559def VDUPLN16d : VDUPLND<0b00, 0b10, "vdup.16", v4i16>;
1560def VDUPLN32d : VDUPLND<0b01, 0b00, "vdup.32", v2i32>;
1561def VDUPLNfd : VDUPLND<0b01, 0b00, "vdup.32", v2f32>;
1562def VDUPLN8q : VDUPLNQ<0b00, 0b01, "vdup.8", v16i8, v8i8>;
1563def VDUPLN16q : VDUPLNQ<0b00, 0b10, "vdup.16", v8i16, v4i16>;
1564def VDUPLN32q : VDUPLNQ<0b01, 0b00, "vdup.32", v4i32, v2i32>;
1565def VDUPLNfq : VDUPLNQ<0b01, 0b00, "vdup.32", v4f32, v2f32>;
1566
1567// VMOVN : Vector Narrowing Move
1568defm VMOVN : N2VNInt_HSD<0b11,0b11,0b10,0b00100,0,0, "vmovn.i",
1569 int_arm_neon_vmovn>;
1570// VQMOVN : Vector Saturating Narrowing Move
1571defm VQMOVNs : N2VNInt_HSD<0b11,0b11,0b10,0b00101,0,0, "vqmovn.s",
1572 int_arm_neon_vqmovns>;
1573defm VQMOVNu : N2VNInt_HSD<0b11,0b11,0b10,0b00101,1,0, "vqmovn.u",
1574 int_arm_neon_vqmovnu>;
1575defm VQMOVNsu : N2VNInt_HSD<0b11,0b11,0b10,0b00100,1,0, "vqmovun.s",
1576 int_arm_neon_vqmovnsu>;
1577// VMOVL : Vector Lengthening Move
1578defm VMOVLs : N2VLInt_QHS<0,1,0b1010,0,0,1, "vmovl.s", int_arm_neon_vmovls>;
1579defm VMOVLu : N2VLInt_QHS<1,1,0b1010,0,0,1, "vmovl.u", int_arm_neon_vmovlu>;
1580
1581// Vector Conversions.
1582
1583// VCVT : Vector Convert Between Floating-Point and Integers
1584def VCVTf2sd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt.s32.f32",
1585 v2i32, v2f32, fp_to_sint>;
1586def VCVTf2ud : N2VD<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt.u32.f32",
1587 v2i32, v2f32, fp_to_uint>;
1588def VCVTs2fd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt.f32.s32",
1589 v2f32, v2i32, sint_to_fp>;
1590def VCVTu2fd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt.f32.u32",
1591 v2f32, v2i32, uint_to_fp>;
1592
1593def VCVTf2sq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt.s32.f32",
1594 v4i32, v4f32, fp_to_sint>;
1595def VCVTf2uq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt.u32.f32",
1596 v4i32, v4f32, fp_to_uint>;
1597def VCVTs2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt.f32.s32",
1598 v4f32, v4i32, sint_to_fp>;
1599def VCVTu2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt.f32.u32",
1600 v4f32, v4i32, uint_to_fp>;
1601
1602// VCVT : Vector Convert Between Floating-Point and Fixed-Point.
1603// Note: Some of the opcode bits in the following VCVT instructions need to
1604// be encoded based on the immed values.
1605def VCVTf2xsd : N2VCvtD<0, 1, 0b000000, 0b1111, 0, 1, "vcvt.s32.f32",
1606 v2i32, v2f32, int_arm_neon_vcvtfp2fxs>;
1607def VCVTf2xud : N2VCvtD<1, 1, 0b000000, 0b1111, 0, 1, "vcvt.u32.f32",
1608 v2i32, v2f32, int_arm_neon_vcvtfp2fxu>;
1609def VCVTxs2fd : N2VCvtD<0, 1, 0b000000, 0b1110, 0, 1, "vcvt.f32.s32",
1610 v2f32, v2i32, int_arm_neon_vcvtfxs2fp>;
1611def VCVTxu2fd : N2VCvtD<1, 1, 0b000000, 0b1110, 0, 1, "vcvt.f32.u32",
1612 v2f32, v2i32, int_arm_neon_vcvtfxu2fp>;
1613
1614def VCVTf2xsq : N2VCvtQ<0, 1, 0b000000, 0b1111, 0, 1, "vcvt.s32.f32",
1615 v4i32, v4f32, int_arm_neon_vcvtfp2fxs>;
1616def VCVTf2xuq : N2VCvtQ<1, 1, 0b000000, 0b1111, 0, 1, "vcvt.u32.f32",
1617 v4i32, v4f32, int_arm_neon_vcvtfp2fxu>;
1618def VCVTxs2fq : N2VCvtQ<0, 1, 0b000000, 0b1110, 0, 1, "vcvt.f32.s32",
1619 v4f32, v4i32, int_arm_neon_vcvtfxs2fp>;
1620def VCVTxu2fq : N2VCvtQ<1, 1, 0b000000, 0b1110, 0, 1, "vcvt.f32.u32",
1621 v4f32, v4i32, int_arm_neon_vcvtfxu2fp>;
1622
1623//===----------------------------------------------------------------------===//
1624// Non-Instruction Patterns
1625//===----------------------------------------------------------------------===//
1626
1627// bit_convert
1628def : Pat<(v1i64 (bitconvert (v2i32 DPR:$src))), (v1i64 DPR:$src)>;
1629def : Pat<(v1i64 (bitconvert (v4i16 DPR:$src))), (v1i64 DPR:$src)>;
1630def : Pat<(v1i64 (bitconvert (v8i8 DPR:$src))), (v1i64 DPR:$src)>;
1631def : Pat<(v1i64 (bitconvert (f64 DPR:$src))), (v1i64 DPR:$src)>;
1632def : Pat<(v1i64 (bitconvert (v2f32 DPR:$src))), (v1i64 DPR:$src)>;
1633def : Pat<(v2i32 (bitconvert (v1i64 DPR:$src))), (v2i32 DPR:$src)>;
1634def : Pat<(v2i32 (bitconvert (v4i16 DPR:$src))), (v2i32 DPR:$src)>;
1635def : Pat<(v2i32 (bitconvert (v8i8 DPR:$src))), (v2i32 DPR:$src)>;
1636def : Pat<(v2i32 (bitconvert (f64 DPR:$src))), (v2i32 DPR:$src)>;
1637def : Pat<(v2i32 (bitconvert (v2f32 DPR:$src))), (v2i32 DPR:$src)>;
1638def : Pat<(v4i16 (bitconvert (v1i64 DPR:$src))), (v4i16 DPR:$src)>;
1639def : Pat<(v4i16 (bitconvert (v2i32 DPR:$src))), (v4i16 DPR:$src)>;
1640def : Pat<(v4i16 (bitconvert (v8i8 DPR:$src))), (v4i16 DPR:$src)>;
1641def : Pat<(v4i16 (bitconvert (f64 DPR:$src))), (v4i16 DPR:$src)>;
1642def : Pat<(v4i16 (bitconvert (v2f32 DPR:$src))), (v4i16 DPR:$src)>;
1643def : Pat<(v8i8 (bitconvert (v1i64 DPR:$src))), (v8i8 DPR:$src)>;
1644def : Pat<(v8i8 (bitconvert (v2i32 DPR:$src))), (v8i8 DPR:$src)>;
1645def : Pat<(v8i8 (bitconvert (v4i16 DPR:$src))), (v8i8 DPR:$src)>;
1646def : Pat<(v8i8 (bitconvert (f64 DPR:$src))), (v8i8 DPR:$src)>;
1647def : Pat<(v8i8 (bitconvert (v2f32 DPR:$src))), (v8i8 DPR:$src)>;
1648def : Pat<(f64 (bitconvert (v1i64 DPR:$src))), (f64 DPR:$src)>;
1649def : Pat<(f64 (bitconvert (v2i32 DPR:$src))), (f64 DPR:$src)>;
1650def : Pat<(f64 (bitconvert (v4i16 DPR:$src))), (f64 DPR:$src)>;
1651def : Pat<(f64 (bitconvert (v8i8 DPR:$src))), (f64 DPR:$src)>;
1652def : Pat<(f64 (bitconvert (v2f32 DPR:$src))), (f64 DPR:$src)>;
1653def : Pat<(v2f32 (bitconvert (f64 DPR:$src))), (v2f32 DPR:$src)>;
1654def : Pat<(v2f32 (bitconvert (v1i64 DPR:$src))), (v2f32 DPR:$src)>;
1655def : Pat<(v2f32 (bitconvert (v2i32 DPR:$src))), (v2f32 DPR:$src)>;
1656def : Pat<(v2f32 (bitconvert (v4i16 DPR:$src))), (v2f32 DPR:$src)>;
1657def : Pat<(v2f32 (bitconvert (v8i8 DPR:$src))), (v2f32 DPR:$src)>;
1658
1659def : Pat<(v2i64 (bitconvert (v4i32 QPR:$src))), (v2i64 QPR:$src)>;
1660def : Pat<(v2i64 (bitconvert (v8i16 QPR:$src))), (v2i64 QPR:$src)>;
1661def : Pat<(v2i64 (bitconvert (v16i8 QPR:$src))), (v2i64 QPR:$src)>;
1662def : Pat<(v2i64 (bitconvert (v2f64 QPR:$src))), (v2i64 QPR:$src)>;
1663def : Pat<(v2i64 (bitconvert (v4f32 QPR:$src))), (v2i64 QPR:$src)>;
1664def : Pat<(v4i32 (bitconvert (v2i64 QPR:$src))), (v4i32 QPR:$src)>;
1665def : Pat<(v4i32 (bitconvert (v8i16 QPR:$src))), (v4i32 QPR:$src)>;
1666def : Pat<(v4i32 (bitconvert (v16i8 QPR:$src))), (v4i32 QPR:$src)>;
1667def : Pat<(v4i32 (bitconvert (v2f64 QPR:$src))), (v4i32 QPR:$src)>;
1668def : Pat<(v4i32 (bitconvert (v4f32 QPR:$src))), (v4i32 QPR:$src)>;
1669def : Pat<(v8i16 (bitconvert (v2i64 QPR:$src))), (v8i16 QPR:$src)>;
1670def : Pat<(v8i16 (bitconvert (v4i32 QPR:$src))), (v8i16 QPR:$src)>;
1671def : Pat<(v8i16 (bitconvert (v16i8 QPR:$src))), (v8i16 QPR:$src)>;
1672def : Pat<(v8i16 (bitconvert (v2f64 QPR:$src))), (v8i16 QPR:$src)>;
1673def : Pat<(v8i16 (bitconvert (v4f32 QPR:$src))), (v8i16 QPR:$src)>;
1674def : Pat<(v16i8 (bitconvert (v2i64 QPR:$src))), (v16i8 QPR:$src)>;
1675def : Pat<(v16i8 (bitconvert (v4i32 QPR:$src))), (v16i8 QPR:$src)>;
1676def : Pat<(v16i8 (bitconvert (v8i16 QPR:$src))), (v16i8 QPR:$src)>;
1677def : Pat<(v16i8 (bitconvert (v2f64 QPR:$src))), (v16i8 QPR:$src)>;
1678def : Pat<(v16i8 (bitconvert (v4f32 QPR:$src))), (v16i8 QPR:$src)>;
1679def : Pat<(v4f32 (bitconvert (v2i64 QPR:$src))), (v4f32 QPR:$src)>;
1680def : Pat<(v4f32 (bitconvert (v4i32 QPR:$src))), (v4f32 QPR:$src)>;
1681def : Pat<(v4f32 (bitconvert (v8i16 QPR:$src))), (v4f32 QPR:$src)>;
1682def : Pat<(v4f32 (bitconvert (v16i8 QPR:$src))), (v4f32 QPR:$src)>;
1683def : Pat<(v4f32 (bitconvert (v2f64 QPR:$src))), (v4f32 QPR:$src)>;
1684def : Pat<(v2f64 (bitconvert (v2i64 QPR:$src))), (v2f64 QPR:$src)>;
1685def : Pat<(v2f64 (bitconvert (v4i32 QPR:$src))), (v2f64 QPR:$src)>;
1686def : Pat<(v2f64 (bitconvert (v8i16 QPR:$src))), (v2f64 QPR:$src)>;
1687def : Pat<(v2f64 (bitconvert (v16i8 QPR:$src))), (v2f64 QPR:$src)>;
1688def : Pat<(v2f64 (bitconvert (v4f32 QPR:$src))), (v2f64 QPR:$src)>;