blob: 83ebf2884a50300ef27387e6e355cc9afc6b74be [file] [log] [blame]
Jia Liub22310f2012-02-18 12:03:15 +00001//===-- PPCInstrAltivec.td - The PowerPC Altivec Extension -*- tablegen -*-===//
2//
Chris Lattner2a85fa12006-03-25 07:51:43 +00003// The LLVM Compiler Infrastructure
4//
Chris Lattnerf3ebc3f2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Jia Liub22310f2012-02-18 12:03:15 +00007//
Chris Lattner2a85fa12006-03-25 07:51:43 +00008//===----------------------------------------------------------------------===//
9//
10// This file describes the Altivec extension to the PowerPC instruction set.
11//
12//===----------------------------------------------------------------------===//
13
Bill Schmidtfe723b92015-04-27 19:57:34 +000014// *********************************** NOTE ***********************************
15// ** For POWER8 Little Endian, the VSX swap optimization relies on knowing **
16// ** which VMX and VSX instructions are lane-sensitive and which are not. **
17// ** A lane-sensitive instruction relies, implicitly or explicitly, on **
18// ** whether lanes are numbered from left to right. An instruction like **
19// ** VADDFP is not lane-sensitive, because each lane of the result vector **
20// ** relies only on the corresponding lane of the source vectors. However, **
21// ** an instruction like VMULESB is lane-sensitive, because "even" and **
22// ** "odd" lanes are different for big-endian and little-endian numbering. **
23// ** **
24// ** When adding new VMX and VSX instructions, please consider whether they **
25// ** are lane-sensitive. If so, they must be added to a switch statement **
26// ** in PPCVSXSwapRemoval::gatherVectorInstructions(). **
27// ****************************************************************************
28
Chris Lattner2a85fa12006-03-25 07:51:43 +000029//===----------------------------------------------------------------------===//
30// Altivec transformation functions and pattern fragments.
31//
32
Chris Lattner1c85e342010-03-28 08:00:23 +000033// Since we canonicalize buildvectors to v16i8, all vnots "-1" operands will be
34// of that type.
35def vnot_ppc : PatFrag<(ops node:$in),
36 (xor node:$in, (bitconvert (v16i8 immAllOnesV)))>;
Chris Lattnere8b83b42006-04-06 17:23:16 +000037
Nate Begeman8d6d4b92009-04-27 18:41:29 +000038def vpkuhum_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
39 (vector_shuffle node:$lhs, node:$rhs), [{
Ulrich Weigandcc9909b2014-08-04 13:53:40 +000040 return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), 0, *CurDAG);
Chris Lattnera4bbfae2006-04-06 22:28:36 +000041}]>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +000042def vpkuwum_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
43 (vector_shuffle node:$lhs, node:$rhs), [{
Ulrich Weigandcc9909b2014-08-04 13:53:40 +000044 return PPC::isVPKUWUMShuffleMask(cast<ShuffleVectorSDNode>(N), 0, *CurDAG);
Nate Begeman8d6d4b92009-04-27 18:41:29 +000045}]>;
Bill Schmidt5ed84cd2015-05-16 01:02:12 +000046def vpkudum_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
47 (vector_shuffle node:$lhs, node:$rhs), [{
48 return PPC::isVPKUDUMShuffleMask(cast<ShuffleVectorSDNode>(N), 0, *CurDAG);
49}]>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +000050def vpkuhum_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
51 (vector_shuffle node:$lhs, node:$rhs), [{
Ulrich Weigandcc9909b2014-08-04 13:53:40 +000052 return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), 1, *CurDAG);
Nate Begeman8d6d4b92009-04-27 18:41:29 +000053}]>;
54def vpkuwum_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
55 (vector_shuffle node:$lhs, node:$rhs), [{
Ulrich Weigandcc9909b2014-08-04 13:53:40 +000056 return PPC::isVPKUWUMShuffleMask(cast<ShuffleVectorSDNode>(N), 1, *CurDAG);
Chris Lattnera4bbfae2006-04-06 22:28:36 +000057}]>;
Bill Schmidt5ed84cd2015-05-16 01:02:12 +000058def vpkudum_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
59 (vector_shuffle node:$lhs, node:$rhs), [{
60 return PPC::isVPKUDUMShuffleMask(cast<ShuffleVectorSDNode>(N), 1, *CurDAG);
61}]>;
Chris Lattnera4bbfae2006-04-06 22:28:36 +000062
Ulrich Weigandcc9909b2014-08-04 13:53:40 +000063// These fragments are provided for little-endian, where the inputs must be
64// swapped for correct semantics.
65def vpkuhum_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
66 (vector_shuffle node:$lhs, node:$rhs), [{
67 return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), 2, *CurDAG);
68}]>;
69def vpkuwum_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
70 (vector_shuffle node:$lhs, node:$rhs), [{
71 return PPC::isVPKUWUMShuffleMask(cast<ShuffleVectorSDNode>(N), 2, *CurDAG);
72}]>;
Bill Schmidt5ed84cd2015-05-16 01:02:12 +000073def vpkudum_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
74 (vector_shuffle node:$lhs, node:$rhs), [{
75 return PPC::isVPKUDUMShuffleMask(cast<ShuffleVectorSDNode>(N), 2, *CurDAG);
76}]>;
Chris Lattnera4bbfae2006-04-06 22:28:36 +000077
Nate Begeman8d6d4b92009-04-27 18:41:29 +000078def vmrglb_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
Chris Lattnerdac58bd02010-03-08 18:44:04 +000079 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +000080 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 0, *CurDAG);
Chris Lattnerd1dcb522006-04-06 21:11:54 +000081}]>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +000082def vmrglh_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
Chris Lattnerdac58bd02010-03-08 18:44:04 +000083 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +000084 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 0, *CurDAG);
Chris Lattnerd1dcb522006-04-06 21:11:54 +000085}]>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +000086def vmrglw_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
Chris Lattnerdac58bd02010-03-08 18:44:04 +000087 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +000088 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 0, *CurDAG);
Chris Lattnerd1dcb522006-04-06 21:11:54 +000089}]>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +000090def vmrghb_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
Chris Lattnerdac58bd02010-03-08 18:44:04 +000091 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +000092 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 0, *CurDAG);
Chris Lattnerd1dcb522006-04-06 21:11:54 +000093}]>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +000094def vmrghh_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
Chris Lattnerdac58bd02010-03-08 18:44:04 +000095 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +000096 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 0, *CurDAG);
Chris Lattnerd1dcb522006-04-06 21:11:54 +000097}]>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +000098def vmrghw_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
Chris Lattnerdac58bd02010-03-08 18:44:04 +000099 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +0000100 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 0, *CurDAG);
Chris Lattnerf38e0332006-04-06 22:02:42 +0000101}]>;
102
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000103
104def vmrglb_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
Chris Lattnerdac58bd02010-03-08 18:44:04 +0000105 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +0000106 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 1, *CurDAG);
Chris Lattnerf38e0332006-04-06 22:02:42 +0000107}]>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000108def vmrglh_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
109 (vector_shuffle node:$lhs, node:$rhs), [{
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +0000110 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 1, *CurDAG);
Chris Lattnerf38e0332006-04-06 22:02:42 +0000111}]>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000112def vmrglw_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
113 (vector_shuffle node:$lhs, node:$rhs), [{
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +0000114 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 1, *CurDAG);
Chris Lattnerf38e0332006-04-06 22:02:42 +0000115}]>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000116def vmrghb_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
117 (vector_shuffle node:$lhs, node:$rhs), [{
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +0000118 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 1, *CurDAG);
Chris Lattnerf38e0332006-04-06 22:02:42 +0000119}]>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000120def vmrghh_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
121 (vector_shuffle node:$lhs, node:$rhs), [{
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +0000122 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 1, *CurDAG);
Chris Lattnerf38e0332006-04-06 22:02:42 +0000123}]>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000124def vmrghw_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
125 (vector_shuffle node:$lhs, node:$rhs), [{
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +0000126 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 1, *CurDAG);
127}]>;
128
129
130// These fragments are provided for little-endian, where the inputs must be
131// swapped for correct semantics.
132def vmrglb_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
133 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
134 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 2, *CurDAG);
135}]>;
136def vmrglh_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
137 (vector_shuffle node:$lhs, node:$rhs), [{
138 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 2, *CurDAG);
139}]>;
140def vmrglw_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
141 (vector_shuffle node:$lhs, node:$rhs), [{
142 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 2, *CurDAG);
143}]>;
144def vmrghb_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
145 (vector_shuffle node:$lhs, node:$rhs), [{
146 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 2, *CurDAG);
147}]>;
148def vmrghh_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
149 (vector_shuffle node:$lhs, node:$rhs), [{
150 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 2, *CurDAG);
151}]>;
152def vmrghw_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
153 (vector_shuffle node:$lhs, node:$rhs), [{
154 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 2, *CurDAG);
Chris Lattnerd1dcb522006-04-06 21:11:54 +0000155}]>;
156
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000157
Kit Barton13894c72015-06-25 15:17:40 +0000158def vmrgew_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
159 (vector_shuffle node:$lhs, node:$rhs), [{
160 return PPC::isVMRGEOShuffleMask(cast<ShuffleVectorSDNode>(N), true, 0, *CurDAG);
161}]>;
162def vmrgow_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
163 (vector_shuffle node:$lhs, node:$rhs), [{
164 return PPC::isVMRGEOShuffleMask(cast<ShuffleVectorSDNode>(N), false, 0, *CurDAG);
165}]>;
166def vmrgew_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
167 (vector_shuffle node:$lhs, node:$rhs), [{
168 return PPC::isVMRGEOShuffleMask(cast<ShuffleVectorSDNode>(N), true, 1, *CurDAG);
169}]>;
170def vmrgow_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
171 (vector_shuffle node:$lhs, node:$rhs), [{
172 return PPC::isVMRGEOShuffleMask(cast<ShuffleVectorSDNode>(N), false, 1, *CurDAG);
173}]>;
174def vmrgew_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
175 (vector_shuffle node:$lhs, node:$rhs), [{
176 return PPC::isVMRGEOShuffleMask(cast<ShuffleVectorSDNode>(N), true, 2, *CurDAG);
177}]>;
178def vmrgow_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
179 (vector_shuffle node:$lhs, node:$rhs), [{
180 return PPC::isVMRGEOShuffleMask(cast<ShuffleVectorSDNode>(N), false, 2, *CurDAG);
181}]>;
182
183
184
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000185def VSLDOI_get_imm : SDNodeXForm<vector_shuffle, [{
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000186 return getI32Imm(PPC::isVSLDOIShuffleMask(N, 0, *CurDAG), SDLoc(N));
Chris Lattner1d338192006-04-06 18:26:28 +0000187}]>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000188def vsldoi_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
189 (vector_shuffle node:$lhs, node:$rhs), [{
Bill Schmidt42a69362014-08-05 20:47:25 +0000190 return PPC::isVSLDOIShuffleMask(N, 0, *CurDAG) != -1;
Chris Lattner1d338192006-04-06 18:26:28 +0000191}], VSLDOI_get_imm>;
192
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000193
Chris Lattnera4bbfae2006-04-06 22:28:36 +0000194/// VSLDOI_unary* - These are used to match vsldoi(X,X), which is turned into
Chris Lattner1d338192006-04-06 18:26:28 +0000195/// vector_shuffle(X,undef,mask) by the dag combiner.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000196def VSLDOI_unary_get_imm : SDNodeXForm<vector_shuffle, [{
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000197 return getI32Imm(PPC::isVSLDOIShuffleMask(N, 1, *CurDAG), SDLoc(N));
Chris Lattner1d338192006-04-06 18:26:28 +0000198}]>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000199def vsldoi_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
200 (vector_shuffle node:$lhs, node:$rhs), [{
Bill Schmidt42a69362014-08-05 20:47:25 +0000201 return PPC::isVSLDOIShuffleMask(N, 1, *CurDAG) != -1;
Chris Lattnera4bbfae2006-04-06 22:28:36 +0000202}], VSLDOI_unary_get_imm>;
Chris Lattner1d338192006-04-06 18:26:28 +0000203
204
Bill Schmidt42a69362014-08-05 20:47:25 +0000205/// VSLDOI_swapped* - These fragments are provided for little-endian, where
206/// the inputs must be swapped for correct semantics.
207def VSLDOI_swapped_get_imm : SDNodeXForm<vector_shuffle, [{
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000208 return getI32Imm(PPC::isVSLDOIShuffleMask(N, 2, *CurDAG), SDLoc(N));
Bill Schmidt42a69362014-08-05 20:47:25 +0000209}]>;
210def vsldoi_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
211 (vector_shuffle node:$lhs, node:$rhs), [{
212 return PPC::isVSLDOIShuffleMask(N, 2, *CurDAG) != -1;
213}], VSLDOI_get_imm>;
214
215
Chris Lattner95c7adc2006-04-04 17:25:31 +0000216// VSPLT*_get_imm xform function: convert vector_shuffle mask to VSPLT* imm.
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000217def VSPLTB_get_imm : SDNodeXForm<vector_shuffle, [{
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000218 return getI32Imm(PPC::getVSPLTImmediate(N, 1, *CurDAG), SDLoc(N));
Chris Lattner2a85fa12006-03-25 07:51:43 +0000219}]>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000220def vspltb_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
221 (vector_shuffle node:$lhs, node:$rhs), [{
222 return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 1);
Chris Lattner95c7adc2006-04-04 17:25:31 +0000223}], VSPLTB_get_imm>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000224def VSPLTH_get_imm : SDNodeXForm<vector_shuffle, [{
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000225 return getI32Imm(PPC::getVSPLTImmediate(N, 2, *CurDAG), SDLoc(N));
Chris Lattner95c7adc2006-04-04 17:25:31 +0000226}]>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000227def vsplth_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
228 (vector_shuffle node:$lhs, node:$rhs), [{
229 return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 2);
Chris Lattner95c7adc2006-04-04 17:25:31 +0000230}], VSPLTH_get_imm>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000231def VSPLTW_get_imm : SDNodeXForm<vector_shuffle, [{
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000232 return getI32Imm(PPC::getVSPLTImmediate(N, 4, *CurDAG), SDLoc(N));
Chris Lattner95c7adc2006-04-04 17:25:31 +0000233}]>;
Nate Begeman8d6d4b92009-04-27 18:41:29 +0000234def vspltw_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
235 (vector_shuffle node:$lhs, node:$rhs), [{
236 return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 4);
Chris Lattner95c7adc2006-04-04 17:25:31 +0000237}], VSPLTW_get_imm>;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000238
Chris Lattner2a85fa12006-03-25 07:51:43 +0000239
240// VSPLTISB_get_imm xform function: convert build_vector to VSPLTISB imm.
241def VSPLTISB_get_imm : SDNodeXForm<build_vector, [{
Chris Lattner74cf9ff2006-04-12 17:37:20 +0000242 return PPC::get_VSPLTI_elt(N, 1, *CurDAG);
Chris Lattner2a85fa12006-03-25 07:51:43 +0000243}]>;
244def vecspltisb : PatLeaf<(build_vector), [{
Gabor Greiff304a7a2008-08-28 21:40:38 +0000245 return PPC::get_VSPLTI_elt(N, 1, *CurDAG).getNode() != 0;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000246}], VSPLTISB_get_imm>;
247
248// VSPLTISH_get_imm xform function: convert build_vector to VSPLTISH imm.
249def VSPLTISH_get_imm : SDNodeXForm<build_vector, [{
Chris Lattner74cf9ff2006-04-12 17:37:20 +0000250 return PPC::get_VSPLTI_elt(N, 2, *CurDAG);
Chris Lattner2a85fa12006-03-25 07:51:43 +0000251}]>;
252def vecspltish : PatLeaf<(build_vector), [{
Gabor Greiff304a7a2008-08-28 21:40:38 +0000253 return PPC::get_VSPLTI_elt(N, 2, *CurDAG).getNode() != 0;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000254}], VSPLTISH_get_imm>;
255
256// VSPLTISW_get_imm xform function: convert build_vector to VSPLTISW imm.
257def VSPLTISW_get_imm : SDNodeXForm<build_vector, [{
Chris Lattner74cf9ff2006-04-12 17:37:20 +0000258 return PPC::get_VSPLTI_elt(N, 4, *CurDAG);
Chris Lattner2a85fa12006-03-25 07:51:43 +0000259}]>;
260def vecspltisw : PatLeaf<(build_vector), [{
Gabor Greiff304a7a2008-08-28 21:40:38 +0000261 return PPC::get_VSPLTI_elt(N, 4, *CurDAG).getNode() != 0;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000262}], VSPLTISW_get_imm>;
263
Chris Lattner2a85fa12006-03-25 07:51:43 +0000264//===----------------------------------------------------------------------===//
Chris Lattnera23158f2006-03-30 23:21:27 +0000265// Helpers for defining instructions that directly correspond to intrinsics.
266
Bill Schmidt74b2e722013-03-28 19:27:24 +0000267// VA1a_Int_Ty - A VAForm_1a intrinsic definition of specific type.
268class VA1a_Int_Ty<bits<6> xo, string opc, Intrinsic IntID, ValueType Ty>
Ulrich Weigand136ac222013-04-26 16:53:15 +0000269 : VAForm_1a<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, vrrc:$vC),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000270 !strconcat(opc, " $vD, $vA, $vB, $vC"), IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000271 [(set Ty:$vD, (IntID Ty:$vA, Ty:$vB, Ty:$vC))]>;
272
273// VA1a_Int_Ty2 - A VAForm_1a intrinsic definition where the type of the
274// inputs doesn't match the type of the output.
275class VA1a_Int_Ty2<bits<6> xo, string opc, Intrinsic IntID, ValueType OutTy,
276 ValueType InTy>
Ulrich Weigand136ac222013-04-26 16:53:15 +0000277 : VAForm_1a<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, vrrc:$vC),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000278 !strconcat(opc, " $vD, $vA, $vB, $vC"), IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000279 [(set OutTy:$vD, (IntID InTy:$vA, InTy:$vB, InTy:$vC))]>;
280
281// VA1a_Int_Ty3 - A VAForm_1a intrinsic definition where there are two
282// input types and an output type.
283class VA1a_Int_Ty3<bits<6> xo, string opc, Intrinsic IntID, ValueType OutTy,
284 ValueType In1Ty, ValueType In2Ty>
Ulrich Weigand136ac222013-04-26 16:53:15 +0000285 : VAForm_1a<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, vrrc:$vC),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000286 !strconcat(opc, " $vD, $vA, $vB, $vC"), IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000287 [(set OutTy:$vD,
288 (IntID In1Ty:$vA, In1Ty:$vB, In2Ty:$vC))]>;
289
Bill Schmidt74b2e722013-03-28 19:27:24 +0000290// VX1_Int_Ty - A VXForm_1 intrinsic definition of specific type.
291class VX1_Int_Ty<bits<11> xo, string opc, Intrinsic IntID, ValueType Ty>
Ulrich Weigand136ac222013-04-26 16:53:15 +0000292 : VXForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000293 !strconcat(opc, " $vD, $vA, $vB"), IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000294 [(set Ty:$vD, (IntID Ty:$vA, Ty:$vB))]>;
295
296// VX1_Int_Ty2 - A VXForm_1 intrinsic definition where the type of the
297// inputs doesn't match the type of the output.
298class VX1_Int_Ty2<bits<11> xo, string opc, Intrinsic IntID, ValueType OutTy,
299 ValueType InTy>
Ulrich Weigand136ac222013-04-26 16:53:15 +0000300 : VXForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000301 !strconcat(opc, " $vD, $vA, $vB"), IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000302 [(set OutTy:$vD, (IntID InTy:$vA, InTy:$vB))]>;
303
304// VX1_Int_Ty3 - A VXForm_1 intrinsic definition where there are two
305// input types and an output type.
306class VX1_Int_Ty3<bits<11> xo, string opc, Intrinsic IntID, ValueType OutTy,
307 ValueType In1Ty, ValueType In2Ty>
Ulrich Weigand136ac222013-04-26 16:53:15 +0000308 : VXForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000309 !strconcat(opc, " $vD, $vA, $vB"), IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000310 [(set OutTy:$vD, (IntID In1Ty:$vA, In2Ty:$vB))]>;
311
Bill Schmidt74b2e722013-03-28 19:27:24 +0000312// VX2_Int_SP - A VXForm_2 intrinsic definition of vector single-precision type.
313class VX2_Int_SP<bits<11> xo, string opc, Intrinsic IntID>
Ulrich Weigand136ac222013-04-26 16:53:15 +0000314 : VXForm_2<xo, (outs vrrc:$vD), (ins vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000315 !strconcat(opc, " $vD, $vB"), IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000316 [(set v4f32:$vD, (IntID v4f32:$vB))]>;
317
318// VX2_Int_Ty2 - A VXForm_2 intrinsic definition where the type of the
319// inputs doesn't match the type of the output.
320class VX2_Int_Ty2<bits<11> xo, string opc, Intrinsic IntID, ValueType OutTy,
321 ValueType InTy>
Ulrich Weigand136ac222013-04-26 16:53:15 +0000322 : VXForm_2<xo, (outs vrrc:$vD), (ins vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000323 !strconcat(opc, " $vD, $vB"), IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000324 [(set OutTy:$vD, (IntID InTy:$vB))]>;
325
Nemanja Ivanovice8effe12015-03-04 20:44:33 +0000326class VXBX_Int_Ty<bits<11> xo, string opc, Intrinsic IntID, ValueType Ty>
327 : VXForm_BX<xo, (outs vrrc:$vD), (ins vrrc:$vA),
328 !strconcat(opc, " $vD, $vA"), IIC_VecFP,
329 [(set Ty:$vD, (IntID Ty:$vA))]>;
330
331class VXCR_Int_Ty<bits<11> xo, string opc, Intrinsic IntID, ValueType Ty>
332 : VXForm_CR<xo, (outs vrrc:$vD), (ins vrrc:$vA, u1imm:$ST, u4imm:$SIX),
333 !strconcat(opc, " $vD, $vA, $ST, $SIX"), IIC_VecFP,
334 [(set Ty:$vD, (IntID Ty:$vA, imm:$ST, imm:$SIX))]>;
335
Chris Lattnera23158f2006-03-30 23:21:27 +0000336//===----------------------------------------------------------------------===//
Chris Lattner2a85fa12006-03-25 07:51:43 +0000337// Instruction Definitions.
338
Eric Christopher1b8e7632014-05-22 01:07:24 +0000339def HasAltivec : Predicate<"PPCSubTarget->hasAltivec()">;
Hal Finkelb0fac422013-03-15 13:21:21 +0000340let Predicates = [HasAltivec] in {
341
Joerg Sonnenberger99ab5902014-08-02 15:09:41 +0000342def DSS : DSS_Form<0, 822, (outs), (ins u5imm:$STRM),
343 "dss $STRM", IIC_LdStLoad /*FIXME*/, [(int_ppc_altivec_dss imm:$STRM)]>,
344 Deprecated<DeprecatedDST> {
345 let A = 0;
346 let B = 0;
347}
348
349def DSSALL : DSS_Form<1, 822, (outs), (ins),
350 "dssall", IIC_LdStLoad /*FIXME*/, [(int_ppc_altivec_dssall)]>,
351 Deprecated<DeprecatedDST> {
352 let STRM = 0;
353 let A = 0;
354 let B = 0;
355}
356
357def DST : DSS_Form<0, 342, (outs), (ins u5imm:$STRM, gprc:$rA, gprc:$rB),
358 "dst $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/,
359 [(int_ppc_altivec_dst i32:$rA, i32:$rB, imm:$STRM)]>,
Hal Finkel0096dbd2013-09-12 14:40:06 +0000360 Deprecated<DeprecatedDST>;
Bill Wendlingb9bf8122007-09-05 04:05:20 +0000361
Joerg Sonnenberger99ab5902014-08-02 15:09:41 +0000362def DSTT : DSS_Form<1, 342, (outs), (ins u5imm:$STRM, gprc:$rA, gprc:$rB),
363 "dstt $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/,
364 [(int_ppc_altivec_dstt i32:$rA, i32:$rB, imm:$STRM)]>,
Hal Finkel0096dbd2013-09-12 14:40:06 +0000365 Deprecated<DeprecatedDST>;
Joerg Sonnenberger99ab5902014-08-02 15:09:41 +0000366
367def DSTST : DSS_Form<0, 374, (outs), (ins u5imm:$STRM, gprc:$rA, gprc:$rB),
368 "dstst $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/,
369 [(int_ppc_altivec_dstst i32:$rA, i32:$rB, imm:$STRM)]>,
Hal Finkel0096dbd2013-09-12 14:40:06 +0000370 Deprecated<DeprecatedDST>;
Joerg Sonnenberger99ab5902014-08-02 15:09:41 +0000371
372def DSTSTT : DSS_Form<1, 374, (outs), (ins u5imm:$STRM, gprc:$rA, gprc:$rB),
373 "dststt $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/,
374 [(int_ppc_altivec_dststt i32:$rA, i32:$rB, imm:$STRM)]>,
Hal Finkel0096dbd2013-09-12 14:40:06 +0000375 Deprecated<DeprecatedDST>;
Joerg Sonnenberger99ab5902014-08-02 15:09:41 +0000376
377let isCodeGenOnly = 1 in {
378 // The very same instructions as above, but formally matching 64bit registers.
379 def DST64 : DSS_Form<0, 342, (outs), (ins u5imm:$STRM, g8rc:$rA, gprc:$rB),
380 "dst $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/,
381 [(int_ppc_altivec_dst i64:$rA, i32:$rB, imm:$STRM)]>,
382 Deprecated<DeprecatedDST>;
383
384 def DSTT64 : DSS_Form<1, 342, (outs), (ins u5imm:$STRM, g8rc:$rA, gprc:$rB),
385 "dstt $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/,
386 [(int_ppc_altivec_dstt i64:$rA, i32:$rB, imm:$STRM)]>,
387 Deprecated<DeprecatedDST>;
388
389 def DSTST64 : DSS_Form<0, 374, (outs), (ins u5imm:$STRM, g8rc:$rA, gprc:$rB),
390 "dstst $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/,
391 [(int_ppc_altivec_dstst i64:$rA, i32:$rB,
392 imm:$STRM)]>,
393 Deprecated<DeprecatedDST>;
394
395 def DSTSTT64 : DSS_Form<1, 374, (outs), (ins u5imm:$STRM, g8rc:$rA, gprc:$rB),
396 "dststt $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/,
397 [(int_ppc_altivec_dststt i64:$rA, i32:$rB,
398 imm:$STRM)]>,
399 Deprecated<DeprecatedDST>;
Ulrich Weigandbbfb0c52013-03-26 10:57:16 +0000400}
Chris Lattnerc94d9322006-04-05 22:27:14 +0000401
Ulrich Weigand136ac222013-04-26 16:53:15 +0000402def MFVSCR : VXForm_4<1540, (outs vrrc:$vD), (ins),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000403 "mfvscr $vD", IIC_LdStStore,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000404 [(set v8i16:$vD, (int_ppc_altivec_mfvscr))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000405def MTVSCR : VXForm_5<1604, (outs), (ins vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000406 "mtvscr $vB", IIC_LdStLoad,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000407 [(int_ppc_altivec_mtvscr v4i32:$vB)]>;
Chris Lattner5a528e52006-04-05 00:03:57 +0000408
Hal Finkel6a778fb2015-03-11 23:28:38 +0000409let PPC970_Unit = 2 in { // Loads.
Ulrich Weigand136ac222013-04-26 16:53:15 +0000410def LVEBX: XForm_1<31, 7, (outs vrrc:$vD), (ins memrr:$src),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000411 "lvebx $vD, $src", IIC_LdStLoad,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000412 [(set v16i8:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000413def LVEHX: XForm_1<31, 39, (outs vrrc:$vD), (ins memrr:$src),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000414 "lvehx $vD, $src", IIC_LdStLoad,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000415 [(set v8i16:$vD, (int_ppc_altivec_lvehx xoaddr:$src))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000416def LVEWX: XForm_1<31, 71, (outs vrrc:$vD), (ins memrr:$src),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000417 "lvewx $vD, $src", IIC_LdStLoad,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000418 [(set v4i32:$vD, (int_ppc_altivec_lvewx xoaddr:$src))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000419def LVX : XForm_1<31, 103, (outs vrrc:$vD), (ins memrr:$src),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000420 "lvx $vD, $src", IIC_LdStLoad,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000421 [(set v4i32:$vD, (int_ppc_altivec_lvx xoaddr:$src))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000422def LVXL : XForm_1<31, 359, (outs vrrc:$vD), (ins memrr:$src),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000423 "lvxl $vD, $src", IIC_LdStLoad,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000424 [(set v4i32:$vD, (int_ppc_altivec_lvxl xoaddr:$src))]>;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000425}
426
Ulrich Weigand136ac222013-04-26 16:53:15 +0000427def LVSL : XForm_1<31, 6, (outs vrrc:$vD), (ins memrr:$src),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000428 "lvsl $vD, $src", IIC_LdStLoad,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000429 [(set v16i8:$vD, (int_ppc_altivec_lvsl xoaddr:$src))]>,
Chris Lattner551d3a12006-03-30 23:07:36 +0000430 PPC970_Unit_LSU;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000431def LVSR : XForm_1<31, 38, (outs vrrc:$vD), (ins memrr:$src),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000432 "lvsr $vD, $src", IIC_LdStLoad,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000433 [(set v16i8:$vD, (int_ppc_altivec_lvsr xoaddr:$src))]>,
Chris Lattner551d3a12006-03-30 23:07:36 +0000434 PPC970_Unit_LSU;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000435
Chris Lattnere20f3802008-01-06 05:53:26 +0000436let PPC970_Unit = 2 in { // Stores.
Ulrich Weigand136ac222013-04-26 16:53:15 +0000437def STVEBX: XForm_8<31, 135, (outs), (ins vrrc:$rS, memrr:$dst),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000438 "stvebx $rS, $dst", IIC_LdStStore,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000439 [(int_ppc_altivec_stvebx v16i8:$rS, xoaddr:$dst)]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000440def STVEHX: XForm_8<31, 167, (outs), (ins vrrc:$rS, memrr:$dst),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000441 "stvehx $rS, $dst", IIC_LdStStore,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000442 [(int_ppc_altivec_stvehx v8i16:$rS, xoaddr:$dst)]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000443def STVEWX: XForm_8<31, 199, (outs), (ins vrrc:$rS, memrr:$dst),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000444 "stvewx $rS, $dst", IIC_LdStStore,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000445 [(int_ppc_altivec_stvewx v4i32:$rS, xoaddr:$dst)]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000446def STVX : XForm_8<31, 231, (outs), (ins vrrc:$rS, memrr:$dst),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000447 "stvx $rS, $dst", IIC_LdStStore,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000448 [(int_ppc_altivec_stvx v4i32:$rS, xoaddr:$dst)]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000449def STVXL : XForm_8<31, 487, (outs), (ins vrrc:$rS, memrr:$dst),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000450 "stvxl $rS, $dst", IIC_LdStStore,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000451 [(int_ppc_altivec_stvxl v4i32:$rS, xoaddr:$dst)]>;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000452}
453
454let PPC970_Unit = 5 in { // VALU Operations.
455// VA-Form instructions. 3-input AltiVec ops.
Hal Finkele01d3212014-03-24 15:07:28 +0000456let isCommutable = 1 in {
Ulrich Weigand136ac222013-04-26 16:53:15 +0000457def VMADDFP : VAForm_1<46, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vC, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000458 "vmaddfp $vD, $vA, $vC, $vB", IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000459 [(set v4f32:$vD,
460 (fma v4f32:$vA, v4f32:$vC, v4f32:$vB))]>;
Hal Finkel0c6d2192013-04-03 14:40:16 +0000461
462// FIXME: The fma+fneg pattern won't match because fneg is not legal.
Ulrich Weigand136ac222013-04-26 16:53:15 +0000463def VNMSUBFP: VAForm_1<47, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vC, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000464 "vnmsubfp $vD, $vA, $vC, $vB", IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000465 [(set v4f32:$vD, (fneg (fma v4f32:$vA, v4f32:$vC,
Hal Finkele01d3212014-03-24 15:07:28 +0000466 (fneg v4f32:$vB))))]>;
Chris Lattner575352a2006-04-05 00:49:48 +0000467
Bill Schmidt74b2e722013-03-28 19:27:24 +0000468def VMHADDSHS : VA1a_Int_Ty<32, "vmhaddshs", int_ppc_altivec_vmhaddshs, v8i16>;
469def VMHRADDSHS : VA1a_Int_Ty<33, "vmhraddshs", int_ppc_altivec_vmhraddshs,
470 v8i16>;
471def VMLADDUHM : VA1a_Int_Ty<34, "vmladduhm", int_ppc_altivec_vmladduhm, v8i16>;
Hal Finkele01d3212014-03-24 15:07:28 +0000472} // isCommutable
Bill Schmidt74b2e722013-03-28 19:27:24 +0000473
474def VPERM : VA1a_Int_Ty3<43, "vperm", int_ppc_altivec_vperm,
475 v4i32, v4i32, v16i8>;
476def VSEL : VA1a_Int_Ty<42, "vsel", int_ppc_altivec_vsel, v4i32>;
Chris Lattnere7fd4b02006-03-31 20:00:35 +0000477
Chris Lattner1d338192006-04-06 18:26:28 +0000478// Shuffles.
Ulrich Weigand136ac222013-04-26 16:53:15 +0000479def VSLDOI : VAForm_2<44, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, u5imm:$SH),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000480 "vsldoi $vD, $vA, $vB, $SH", IIC_VecFP,
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000481 [(set v16i8:$vD,
482 (vsldoi_shuffle:$SH v16i8:$vA, v16i8:$vB))]>;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000483
484// VX-Form instructions. AltiVec arithmetic ops.
Hal Finkele01d3212014-03-24 15:07:28 +0000485let isCommutable = 1 in {
Ulrich Weigand136ac222013-04-26 16:53:15 +0000486def VADDFP : VXForm_1<10, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000487 "vaddfp $vD, $vA, $vB", IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000488 [(set v4f32:$vD, (fadd v4f32:$vA, v4f32:$vB))]>;
Chris Lattnerc6c88b22006-03-26 02:39:02 +0000489
Ulrich Weigand136ac222013-04-26 16:53:15 +0000490def VADDUBM : VXForm_1<0, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000491 "vaddubm $vD, $vA, $vB", IIC_VecGeneral,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000492 [(set v16i8:$vD, (add v16i8:$vA, v16i8:$vB))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000493def VADDUHM : VXForm_1<64, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000494 "vadduhm $vD, $vA, $vB", IIC_VecGeneral,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000495 [(set v8i16:$vD, (add v8i16:$vA, v8i16:$vB))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000496def VADDUWM : VXForm_1<128, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000497 "vadduwm $vD, $vA, $vB", IIC_VecGeneral,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000498 [(set v4i32:$vD, (add v4i32:$vA, v4i32:$vB))]>;
Chris Lattnerc6c88b22006-03-26 02:39:02 +0000499
Bill Schmidt74b2e722013-03-28 19:27:24 +0000500def VADDCUW : VX1_Int_Ty<384, "vaddcuw", int_ppc_altivec_vaddcuw, v4i32>;
501def VADDSBS : VX1_Int_Ty<768, "vaddsbs", int_ppc_altivec_vaddsbs, v16i8>;
502def VADDSHS : VX1_Int_Ty<832, "vaddshs", int_ppc_altivec_vaddshs, v8i16>;
503def VADDSWS : VX1_Int_Ty<896, "vaddsws", int_ppc_altivec_vaddsws, v4i32>;
504def VADDUBS : VX1_Int_Ty<512, "vaddubs", int_ppc_altivec_vaddubs, v16i8>;
505def VADDUHS : VX1_Int_Ty<576, "vadduhs", int_ppc_altivec_vadduhs, v8i16>;
506def VADDUWS : VX1_Int_Ty<640, "vadduws", int_ppc_altivec_vadduws, v4i32>;
Hal Finkele01d3212014-03-24 15:07:28 +0000507} // isCommutable
508
509let isCommutable = 1 in
Ulrich Weigand136ac222013-04-26 16:53:15 +0000510def VAND : VXForm_1<1028, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000511 "vand $vD, $vA, $vB", IIC_VecFP,
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000512 [(set v4i32:$vD, (and v4i32:$vA, v4i32:$vB))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000513def VANDC : VXForm_1<1092, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000514 "vandc $vD, $vA, $vB", IIC_VecFP,
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000515 [(set v4i32:$vD, (and v4i32:$vA,
516 (vnot_ppc v4i32:$vB)))]>;
Chris Lattnerb3617be2006-03-25 22:16:05 +0000517
Ulrich Weigand136ac222013-04-26 16:53:15 +0000518def VCFSX : VXForm_1<842, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000519 "vcfsx $vD, $vB, $UIMM", IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000520 [(set v4f32:$vD,
521 (int_ppc_altivec_vcfsx v4i32:$vB, imm:$UIMM))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000522def VCFUX : VXForm_1<778, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000523 "vcfux $vD, $vB, $UIMM", IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000524 [(set v4f32:$vD,
525 (int_ppc_altivec_vcfux v4i32:$vB, imm:$UIMM))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000526def VCTSXS : VXForm_1<970, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000527 "vctsxs $vD, $vB, $UIMM", IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000528 [(set v4i32:$vD,
529 (int_ppc_altivec_vctsxs v4f32:$vB, imm:$UIMM))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000530def VCTUXS : VXForm_1<906, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000531 "vctuxs $vD, $vB, $UIMM", IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000532 [(set v4i32:$vD,
533 (int_ppc_altivec_vctuxs v4f32:$vB, imm:$UIMM))]>;
Adhemerval Zanella5c6e0842012-10-08 17:27:24 +0000534
535// Defines with the UIM field set to 0 for floating-point
536// to integer (fp_to_sint/fp_to_uint) conversions and integer
537// to floating-point (sint_to_fp/uint_to_fp) conversions.
Ulrich Weigand9d2e2022013-07-03 12:51:09 +0000538let isCodeGenOnly = 1, VA = 0 in {
Ulrich Weigand136ac222013-04-26 16:53:15 +0000539def VCFSX_0 : VXForm_1<842, (outs vrrc:$vD), (ins vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000540 "vcfsx $vD, $vB, 0", IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000541 [(set v4f32:$vD,
542 (int_ppc_altivec_vcfsx v4i32:$vB, 0))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000543def VCTUXS_0 : VXForm_1<906, (outs vrrc:$vD), (ins vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000544 "vctuxs $vD, $vB, 0", IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000545 [(set v4i32:$vD,
546 (int_ppc_altivec_vctuxs v4f32:$vB, 0))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000547def VCFUX_0 : VXForm_1<778, (outs vrrc:$vD), (ins vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000548 "vcfux $vD, $vB, 0", IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000549 [(set v4f32:$vD,
550 (int_ppc_altivec_vcfux v4i32:$vB, 0))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000551def VCTSXS_0 : VXForm_1<970, (outs vrrc:$vD), (ins vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000552 "vctsxs $vD, $vB, 0", IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000553 [(set v4i32:$vD,
554 (int_ppc_altivec_vctsxs v4f32:$vB, 0))]>;
Adhemerval Zanella5c6e0842012-10-08 17:27:24 +0000555}
Bill Schmidt74b2e722013-03-28 19:27:24 +0000556def VEXPTEFP : VX2_Int_SP<394, "vexptefp", int_ppc_altivec_vexptefp>;
557def VLOGEFP : VX2_Int_SP<458, "vlogefp", int_ppc_altivec_vlogefp>;
Chris Lattnerff77dc02006-03-31 22:41:56 +0000558
Hal Finkele01d3212014-03-24 15:07:28 +0000559let isCommutable = 1 in {
Bill Schmidt74b2e722013-03-28 19:27:24 +0000560def VAVGSB : VX1_Int_Ty<1282, "vavgsb", int_ppc_altivec_vavgsb, v16i8>;
561def VAVGSH : VX1_Int_Ty<1346, "vavgsh", int_ppc_altivec_vavgsh, v8i16>;
562def VAVGSW : VX1_Int_Ty<1410, "vavgsw", int_ppc_altivec_vavgsw, v4i32>;
563def VAVGUB : VX1_Int_Ty<1026, "vavgub", int_ppc_altivec_vavgub, v16i8>;
564def VAVGUH : VX1_Int_Ty<1090, "vavguh", int_ppc_altivec_vavguh, v8i16>;
565def VAVGUW : VX1_Int_Ty<1154, "vavguw", int_ppc_altivec_vavguw, v4i32>;
Chris Lattner96338b62006-04-04 23:14:00 +0000566
Bill Schmidt74b2e722013-03-28 19:27:24 +0000567def VMAXFP : VX1_Int_Ty<1034, "vmaxfp", int_ppc_altivec_vmaxfp, v4f32>;
568def VMAXSB : VX1_Int_Ty< 258, "vmaxsb", int_ppc_altivec_vmaxsb, v16i8>;
569def VMAXSH : VX1_Int_Ty< 322, "vmaxsh", int_ppc_altivec_vmaxsh, v8i16>;
570def VMAXSW : VX1_Int_Ty< 386, "vmaxsw", int_ppc_altivec_vmaxsw, v4i32>;
571def VMAXUB : VX1_Int_Ty< 2, "vmaxub", int_ppc_altivec_vmaxub, v16i8>;
572def VMAXUH : VX1_Int_Ty< 66, "vmaxuh", int_ppc_altivec_vmaxuh, v8i16>;
573def VMAXUW : VX1_Int_Ty< 130, "vmaxuw", int_ppc_altivec_vmaxuw, v4i32>;
574def VMINFP : VX1_Int_Ty<1098, "vminfp", int_ppc_altivec_vminfp, v4f32>;
575def VMINSB : VX1_Int_Ty< 770, "vminsb", int_ppc_altivec_vminsb, v16i8>;
576def VMINSH : VX1_Int_Ty< 834, "vminsh", int_ppc_altivec_vminsh, v8i16>;
577def VMINSW : VX1_Int_Ty< 898, "vminsw", int_ppc_altivec_vminsw, v4i32>;
578def VMINUB : VX1_Int_Ty< 514, "vminub", int_ppc_altivec_vminub, v16i8>;
579def VMINUH : VX1_Int_Ty< 578, "vminuh", int_ppc_altivec_vminuh, v8i16>;
580def VMINUW : VX1_Int_Ty< 642, "vminuw", int_ppc_altivec_vminuw, v4i32>;
Hal Finkele01d3212014-03-24 15:07:28 +0000581} // isCommutable
Chris Lattner551d3a12006-03-30 23:07:36 +0000582
Ulrich Weigand136ac222013-04-26 16:53:15 +0000583def VMRGHB : VXForm_1< 12, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000584 "vmrghb $vD, $vA, $vB", IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000585 [(set v16i8:$vD, (vmrghb_shuffle v16i8:$vA, v16i8:$vB))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000586def VMRGHH : VXForm_1< 76, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000587 "vmrghh $vD, $vA, $vB", IIC_VecFP,
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000588 [(set v16i8:$vD, (vmrghh_shuffle v16i8:$vA, v16i8:$vB))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000589def VMRGHW : VXForm_1<140, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000590 "vmrghw $vD, $vA, $vB", IIC_VecFP,
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000591 [(set v16i8:$vD, (vmrghw_shuffle v16i8:$vA, v16i8:$vB))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000592def VMRGLB : VXForm_1<268, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000593 "vmrglb $vD, $vA, $vB", IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000594 [(set v16i8:$vD, (vmrglb_shuffle v16i8:$vA, v16i8:$vB))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000595def VMRGLH : VXForm_1<332, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000596 "vmrglh $vD, $vA, $vB", IIC_VecFP,
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000597 [(set v16i8:$vD, (vmrglh_shuffle v16i8:$vA, v16i8:$vB))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000598def VMRGLW : VXForm_1<396, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000599 "vmrglw $vD, $vA, $vB", IIC_VecFP,
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000600 [(set v16i8:$vD, (vmrglw_shuffle v16i8:$vA, v16i8:$vB))]>;
Chris Lattnera23158f2006-03-30 23:21:27 +0000601
Bill Schmidt74b2e722013-03-28 19:27:24 +0000602def VMSUMMBM : VA1a_Int_Ty3<37, "vmsummbm", int_ppc_altivec_vmsummbm,
603 v4i32, v16i8, v4i32>;
604def VMSUMSHM : VA1a_Int_Ty3<40, "vmsumshm", int_ppc_altivec_vmsumshm,
605 v4i32, v8i16, v4i32>;
606def VMSUMSHS : VA1a_Int_Ty3<41, "vmsumshs", int_ppc_altivec_vmsumshs,
607 v4i32, v8i16, v4i32>;
608def VMSUMUBM : VA1a_Int_Ty3<36, "vmsumubm", int_ppc_altivec_vmsumubm,
609 v4i32, v16i8, v4i32>;
610def VMSUMUHM : VA1a_Int_Ty3<38, "vmsumuhm", int_ppc_altivec_vmsumuhm,
611 v4i32, v8i16, v4i32>;
612def VMSUMUHS : VA1a_Int_Ty3<39, "vmsumuhs", int_ppc_altivec_vmsumuhs,
613 v4i32, v8i16, v4i32>;
Chris Lattnerc4e3ead2006-03-30 23:39:06 +0000614
Hal Finkele01d3212014-03-24 15:07:28 +0000615let isCommutable = 1 in {
Bill Schmidt74b2e722013-03-28 19:27:24 +0000616def VMULESB : VX1_Int_Ty2<776, "vmulesb", int_ppc_altivec_vmulesb,
617 v8i16, v16i8>;
618def VMULESH : VX1_Int_Ty2<840, "vmulesh", int_ppc_altivec_vmulesh,
619 v4i32, v8i16>;
620def VMULEUB : VX1_Int_Ty2<520, "vmuleub", int_ppc_altivec_vmuleub,
621 v8i16, v16i8>;
622def VMULEUH : VX1_Int_Ty2<584, "vmuleuh", int_ppc_altivec_vmuleuh,
623 v4i32, v8i16>;
624def VMULOSB : VX1_Int_Ty2<264, "vmulosb", int_ppc_altivec_vmulosb,
625 v8i16, v16i8>;
626def VMULOSH : VX1_Int_Ty2<328, "vmulosh", int_ppc_altivec_vmulosh,
627 v4i32, v8i16>;
628def VMULOUB : VX1_Int_Ty2< 8, "vmuloub", int_ppc_altivec_vmuloub,
629 v8i16, v16i8>;
630def VMULOUH : VX1_Int_Ty2< 72, "vmulouh", int_ppc_altivec_vmulouh,
631 v4i32, v8i16>;
Hal Finkele01d3212014-03-24 15:07:28 +0000632} // isCommutable
Chris Lattner551d3a12006-03-30 23:07:36 +0000633
Bill Schmidt74b2e722013-03-28 19:27:24 +0000634def VREFP : VX2_Int_SP<266, "vrefp", int_ppc_altivec_vrefp>;
635def VRFIM : VX2_Int_SP<714, "vrfim", int_ppc_altivec_vrfim>;
636def VRFIN : VX2_Int_SP<522, "vrfin", int_ppc_altivec_vrfin>;
637def VRFIP : VX2_Int_SP<650, "vrfip", int_ppc_altivec_vrfip>;
638def VRFIZ : VX2_Int_SP<586, "vrfiz", int_ppc_altivec_vrfiz>;
639def VRSQRTEFP : VX2_Int_SP<330, "vrsqrtefp", int_ppc_altivec_vrsqrtefp>;
Chris Lattnera23158f2006-03-30 23:21:27 +0000640
Ulrich Weigand551b0852013-04-26 15:39:57 +0000641def VSUBCUW : VX1_Int_Ty<1408, "vsubcuw", int_ppc_altivec_vsubcuw, v4i32>;
Chris Lattnera23158f2006-03-30 23:21:27 +0000642
Ulrich Weigand136ac222013-04-26 16:53:15 +0000643def VSUBFP : VXForm_1<74, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000644 "vsubfp $vD, $vA, $vB", IIC_VecGeneral,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000645 [(set v4f32:$vD, (fsub v4f32:$vA, v4f32:$vB))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000646def VSUBUBM : VXForm_1<1024, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000647 "vsububm $vD, $vA, $vB", IIC_VecGeneral,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000648 [(set v16i8:$vD, (sub v16i8:$vA, v16i8:$vB))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000649def VSUBUHM : VXForm_1<1088, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000650 "vsubuhm $vD, $vA, $vB", IIC_VecGeneral,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000651 [(set v8i16:$vD, (sub v8i16:$vA, v8i16:$vB))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000652def VSUBUWM : VXForm_1<1152, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000653 "vsubuwm $vD, $vA, $vB", IIC_VecGeneral,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000654 [(set v4i32:$vD, (sub v4i32:$vA, v4i32:$vB))]>;
Chris Lattnerc6c88b22006-03-26 02:39:02 +0000655
Bill Schmidt74b2e722013-03-28 19:27:24 +0000656def VSUBSBS : VX1_Int_Ty<1792, "vsubsbs" , int_ppc_altivec_vsubsbs, v16i8>;
657def VSUBSHS : VX1_Int_Ty<1856, "vsubshs" , int_ppc_altivec_vsubshs, v8i16>;
658def VSUBSWS : VX1_Int_Ty<1920, "vsubsws" , int_ppc_altivec_vsubsws, v4i32>;
659def VSUBUBS : VX1_Int_Ty<1536, "vsububs" , int_ppc_altivec_vsububs, v16i8>;
660def VSUBUHS : VX1_Int_Ty<1600, "vsubuhs" , int_ppc_altivec_vsubuhs, v8i16>;
661def VSUBUWS : VX1_Int_Ty<1664, "vsubuws" , int_ppc_altivec_vsubuws, v4i32>;
662
663def VSUMSWS : VX1_Int_Ty<1928, "vsumsws" , int_ppc_altivec_vsumsws, v4i32>;
664def VSUM2SWS: VX1_Int_Ty<1672, "vsum2sws", int_ppc_altivec_vsum2sws, v4i32>;
665
Ulrich Weigand551b0852013-04-26 15:39:57 +0000666def VSUM4SBS: VX1_Int_Ty3<1800, "vsum4sbs", int_ppc_altivec_vsum4sbs,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000667 v4i32, v16i8, v4i32>;
668def VSUM4SHS: VX1_Int_Ty3<1608, "vsum4shs", int_ppc_altivec_vsum4shs,
669 v4i32, v8i16, v4i32>;
670def VSUM4UBS: VX1_Int_Ty3<1544, "vsum4ubs", int_ppc_altivec_vsum4ubs,
671 v4i32, v16i8, v4i32>;
Chris Lattner3710fca2006-03-28 02:29:37 +0000672
Ulrich Weigand136ac222013-04-26 16:53:15 +0000673def VNOR : VXForm_1<1284, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000674 "vnor $vD, $vA, $vB", IIC_VecFP,
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000675 [(set v4i32:$vD, (vnot_ppc (or v4i32:$vA,
676 v4i32:$vB)))]>;
Hal Finkele01d3212014-03-24 15:07:28 +0000677let isCommutable = 1 in {
Ulrich Weigand136ac222013-04-26 16:53:15 +0000678def VOR : VXForm_1<1156, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000679 "vor $vD, $vA, $vB", IIC_VecFP,
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000680 [(set v4i32:$vD, (or v4i32:$vA, v4i32:$vB))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000681def VXOR : VXForm_1<1220, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000682 "vxor $vD, $vA, $vB", IIC_VecFP,
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000683 [(set v4i32:$vD, (xor v4i32:$vA, v4i32:$vB))]>;
Hal Finkele01d3212014-03-24 15:07:28 +0000684} // isCommutable
Chris Lattner2a85fa12006-03-25 07:51:43 +0000685
Bill Schmidt74b2e722013-03-28 19:27:24 +0000686def VRLB : VX1_Int_Ty< 4, "vrlb", int_ppc_altivec_vrlb, v16i8>;
687def VRLH : VX1_Int_Ty< 68, "vrlh", int_ppc_altivec_vrlh, v8i16>;
688def VRLW : VX1_Int_Ty< 132, "vrlw", int_ppc_altivec_vrlw, v4i32>;
Chris Lattner2f8e2b22006-04-05 01:16:22 +0000689
Bill Schmidt74b2e722013-03-28 19:27:24 +0000690def VSL : VX1_Int_Ty< 452, "vsl" , int_ppc_altivec_vsl, v4i32 >;
691def VSLO : VX1_Int_Ty<1036, "vslo", int_ppc_altivec_vslo, v4i32>;
692
693def VSLB : VX1_Int_Ty< 260, "vslb", int_ppc_altivec_vslb, v16i8>;
694def VSLH : VX1_Int_Ty< 324, "vslh", int_ppc_altivec_vslh, v8i16>;
695def VSLW : VX1_Int_Ty< 388, "vslw", int_ppc_altivec_vslw, v4i32>;
Chris Lattner3710fca2006-03-28 02:29:37 +0000696
Ulrich Weigand136ac222013-04-26 16:53:15 +0000697def VSPLTB : VXForm_1<524, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000698 "vspltb $vD, $vB, $UIMM", IIC_VecPerm,
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000699 [(set v16i8:$vD,
700 (vspltb_shuffle:$UIMM v16i8:$vB, (undef)))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000701def VSPLTH : VXForm_1<588, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000702 "vsplth $vD, $vB, $UIMM", IIC_VecPerm,
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000703 [(set v16i8:$vD,
704 (vsplth_shuffle:$UIMM v16i8:$vB, (undef)))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000705def VSPLTW : VXForm_1<652, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000706 "vspltw $vD, $vB, $UIMM", IIC_VecPerm,
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000707 [(set v16i8:$vD,
708 (vspltw_shuffle:$UIMM v16i8:$vB, (undef)))]>;
Nemanja Ivanovic11049f82016-10-04 06:59:23 +0000709let isCodeGenOnly = 1 in {
710 def VSPLTBs : VXForm_1<524, (outs vrrc:$vD), (ins u5imm:$UIMM, vfrc:$vB),
711 "vspltb $vD, $vB, $UIMM", IIC_VecPerm, []>;
712 def VSPLTHs : VXForm_1<588, (outs vrrc:$vD), (ins u5imm:$UIMM, vfrc:$vB),
713 "vsplth $vD, $vB, $UIMM", IIC_VecPerm, []>;
714}
Chris Lattner2a85fa12006-03-25 07:51:43 +0000715
Bill Schmidt74b2e722013-03-28 19:27:24 +0000716def VSR : VX1_Int_Ty< 708, "vsr" , int_ppc_altivec_vsr, v4i32>;
717def VSRO : VX1_Int_Ty<1100, "vsro" , int_ppc_altivec_vsro, v4i32>;
718
719def VSRAB : VX1_Int_Ty< 772, "vsrab", int_ppc_altivec_vsrab, v16i8>;
720def VSRAH : VX1_Int_Ty< 836, "vsrah", int_ppc_altivec_vsrah, v8i16>;
721def VSRAW : VX1_Int_Ty< 900, "vsraw", int_ppc_altivec_vsraw, v4i32>;
722def VSRB : VX1_Int_Ty< 516, "vsrb" , int_ppc_altivec_vsrb , v16i8>;
723def VSRH : VX1_Int_Ty< 580, "vsrh" , int_ppc_altivec_vsrh , v8i16>;
724def VSRW : VX1_Int_Ty< 644, "vsrw" , int_ppc_altivec_vsrw , v4i32>;
Chris Lattner3710fca2006-03-28 02:29:37 +0000725
726
Ulrich Weigand136ac222013-04-26 16:53:15 +0000727def VSPLTISB : VXForm_3<780, (outs vrrc:$vD), (ins s5imm:$SIMM),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000728 "vspltisb $vD, $SIMM", IIC_VecPerm,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000729 [(set v16i8:$vD, (v16i8 vecspltisb:$SIMM))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000730def VSPLTISH : VXForm_3<844, (outs vrrc:$vD), (ins s5imm:$SIMM),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000731 "vspltish $vD, $SIMM", IIC_VecPerm,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000732 [(set v8i16:$vD, (v8i16 vecspltish:$SIMM))]>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000733def VSPLTISW : VXForm_3<908, (outs vrrc:$vD), (ins s5imm:$SIMM),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000734 "vspltisw $vD, $SIMM", IIC_VecPerm,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000735 [(set v4i32:$vD, (v4i32 vecspltisw:$SIMM))]>;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000736
Chris Lattner551d3a12006-03-30 23:07:36 +0000737// Vector Pack.
Bill Schmidt74b2e722013-03-28 19:27:24 +0000738def VPKPX : VX1_Int_Ty2<782, "vpkpx", int_ppc_altivec_vpkpx,
739 v8i16, v4i32>;
740def VPKSHSS : VX1_Int_Ty2<398, "vpkshss", int_ppc_altivec_vpkshss,
741 v16i8, v8i16>;
742def VPKSHUS : VX1_Int_Ty2<270, "vpkshus", int_ppc_altivec_vpkshus,
743 v16i8, v8i16>;
744def VPKSWSS : VX1_Int_Ty2<462, "vpkswss", int_ppc_altivec_vpkswss,
Nemanja Ivanovicd389c7a2016-02-05 14:50:29 +0000745 v8i16, v4i32>;
Bill Schmidt74b2e722013-03-28 19:27:24 +0000746def VPKSWUS : VX1_Int_Ty2<334, "vpkswus", int_ppc_altivec_vpkswus,
747 v8i16, v4i32>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000748def VPKUHUM : VXForm_1<14, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000749 "vpkuhum $vD, $vA, $vB", IIC_VecFP,
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000750 [(set v16i8:$vD,
751 (vpkuhum_shuffle v16i8:$vA, v16i8:$vB))]>;
Bill Schmidt74b2e722013-03-28 19:27:24 +0000752def VPKUHUS : VX1_Int_Ty2<142, "vpkuhus", int_ppc_altivec_vpkuhus,
753 v16i8, v8i16>;
Ulrich Weigand136ac222013-04-26 16:53:15 +0000754def VPKUWUM : VXForm_1<78, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000755 "vpkuwum $vD, $vA, $vB", IIC_VecFP,
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000756 [(set v16i8:$vD,
757 (vpkuwum_shuffle v16i8:$vA, v16i8:$vB))]>;
Bill Schmidt74b2e722013-03-28 19:27:24 +0000758def VPKUWUS : VX1_Int_Ty2<206, "vpkuwus", int_ppc_altivec_vpkuwus,
759 v8i16, v4i32>;
Chris Lattner551d3a12006-03-30 23:07:36 +0000760
761// Vector Unpack.
Bill Schmidt74b2e722013-03-28 19:27:24 +0000762def VUPKHPX : VX2_Int_Ty2<846, "vupkhpx", int_ppc_altivec_vupkhpx,
763 v4i32, v8i16>;
764def VUPKHSB : VX2_Int_Ty2<526, "vupkhsb", int_ppc_altivec_vupkhsb,
765 v8i16, v16i8>;
766def VUPKHSH : VX2_Int_Ty2<590, "vupkhsh", int_ppc_altivec_vupkhsh,
767 v4i32, v8i16>;
768def VUPKLPX : VX2_Int_Ty2<974, "vupklpx", int_ppc_altivec_vupklpx,
769 v4i32, v8i16>;
770def VUPKLSB : VX2_Int_Ty2<654, "vupklsb", int_ppc_altivec_vupklsb,
771 v8i16, v16i8>;
772def VUPKLSH : VX2_Int_Ty2<718, "vupklsh", int_ppc_altivec_vupklsh,
773 v4i32, v8i16>;
Chris Lattner551d3a12006-03-30 23:07:36 +0000774
Chris Lattner2a85fa12006-03-25 07:51:43 +0000775
Chris Lattner793cbcb2006-03-26 04:57:17 +0000776// Altivec Comparisons.
777
Chris Lattner45c70932006-03-31 05:32:57 +0000778class VCMP<bits<10> xo, string asmstr, ValueType Ty>
Hal Finkel3e5a3602013-11-27 23:26:09 +0000779 : VXRForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), asmstr,
780 IIC_VecFPCompare,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000781 [(set Ty:$vD, (Ty (PPCvcmp Ty:$vA, Ty:$vB, xo)))]>;
Chris Lattner45c70932006-03-31 05:32:57 +0000782class VCMPo<bits<10> xo, string asmstr, ValueType Ty>
Hal Finkel3e5a3602013-11-27 23:26:09 +0000783 : VXRForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), asmstr,
784 IIC_VecFPCompare,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000785 [(set Ty:$vD, (Ty (PPCvcmp_o Ty:$vA, Ty:$vB, xo)))]> {
Chris Lattner95c7adc2006-04-04 17:25:31 +0000786 let Defs = [CR6];
787 let RC = 1;
788}
Chris Lattner45c70932006-03-31 05:32:57 +0000789
790// f32 element comparisons.0
791def VCMPBFP : VCMP <966, "vcmpbfp $vD, $vA, $vB" , v4f32>;
792def VCMPBFPo : VCMPo<966, "vcmpbfp. $vD, $vA, $vB" , v4f32>;
793def VCMPEQFP : VCMP <198, "vcmpeqfp $vD, $vA, $vB" , v4f32>;
794def VCMPEQFPo : VCMPo<198, "vcmpeqfp. $vD, $vA, $vB", v4f32>;
795def VCMPGEFP : VCMP <454, "vcmpgefp $vD, $vA, $vB" , v4f32>;
796def VCMPGEFPo : VCMPo<454, "vcmpgefp. $vD, $vA, $vB", v4f32>;
797def VCMPGTFP : VCMP <710, "vcmpgtfp $vD, $vA, $vB" , v4f32>;
798def VCMPGTFPo : VCMPo<710, "vcmpgtfp. $vD, $vA, $vB", v4f32>;
Chris Lattner793cbcb2006-03-26 04:57:17 +0000799
800// i8 element comparisons.
Chris Lattner45c70932006-03-31 05:32:57 +0000801def VCMPEQUB : VCMP < 6, "vcmpequb $vD, $vA, $vB" , v16i8>;
802def VCMPEQUBo : VCMPo< 6, "vcmpequb. $vD, $vA, $vB", v16i8>;
803def VCMPGTSB : VCMP <774, "vcmpgtsb $vD, $vA, $vB" , v16i8>;
804def VCMPGTSBo : VCMPo<774, "vcmpgtsb. $vD, $vA, $vB", v16i8>;
805def VCMPGTUB : VCMP <518, "vcmpgtub $vD, $vA, $vB" , v16i8>;
806def VCMPGTUBo : VCMPo<518, "vcmpgtub. $vD, $vA, $vB", v16i8>;
Chris Lattner793cbcb2006-03-26 04:57:17 +0000807
808// i16 element comparisons.
Chris Lattner45c70932006-03-31 05:32:57 +0000809def VCMPEQUH : VCMP < 70, "vcmpequh $vD, $vA, $vB" , v8i16>;
810def VCMPEQUHo : VCMPo< 70, "vcmpequh. $vD, $vA, $vB", v8i16>;
811def VCMPGTSH : VCMP <838, "vcmpgtsh $vD, $vA, $vB" , v8i16>;
812def VCMPGTSHo : VCMPo<838, "vcmpgtsh. $vD, $vA, $vB", v8i16>;
813def VCMPGTUH : VCMP <582, "vcmpgtuh $vD, $vA, $vB" , v8i16>;
814def VCMPGTUHo : VCMPo<582, "vcmpgtuh. $vD, $vA, $vB", v8i16>;
Chris Lattner793cbcb2006-03-26 04:57:17 +0000815
816// i32 element comparisons.
Chris Lattner45c70932006-03-31 05:32:57 +0000817def VCMPEQUW : VCMP <134, "vcmpequw $vD, $vA, $vB" , v4i32>;
818def VCMPEQUWo : VCMPo<134, "vcmpequw. $vD, $vA, $vB", v4i32>;
819def VCMPGTSW : VCMP <902, "vcmpgtsw $vD, $vA, $vB" , v4i32>;
820def VCMPGTSWo : VCMPo<902, "vcmpgtsw. $vD, $vA, $vB", v4i32>;
821def VCMPGTUW : VCMP <646, "vcmpgtuw $vD, $vA, $vB" , v4i32>;
822def VCMPGTUWo : VCMPo<646, "vcmpgtuw. $vD, $vA, $vB", v4i32>;
Kit Barton0cfa7b72015-03-03 19:55:45 +0000823
Ulrich Weigand9d2e2022013-07-03 12:51:09 +0000824let isCodeGenOnly = 1 in {
Hal Finkel47150812013-07-11 17:43:32 +0000825def V_SET0B : VXForm_setzero<1220, (outs vrrc:$vD), (ins),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000826 "vxor $vD, $vD, $vD", IIC_VecFP,
Hal Finkel47150812013-07-11 17:43:32 +0000827 [(set v16i8:$vD, (v16i8 immAllZerosV))]>;
828def V_SET0H : VXForm_setzero<1220, (outs vrrc:$vD), (ins),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000829 "vxor $vD, $vD, $vD", IIC_VecFP,
Hal Finkel47150812013-07-11 17:43:32 +0000830 [(set v8i16:$vD, (v8i16 immAllZerosV))]>;
831def V_SET0 : VXForm_setzero<1220, (outs vrrc:$vD), (ins),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000832 "vxor $vD, $vD, $vD", IIC_VecFP,
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000833 [(set v4i32:$vD, (v4i32 immAllZerosV))]>;
Hal Finkel47150812013-07-11 17:43:32 +0000834
Adhemerval Zanella812410f2012-11-30 13:05:44 +0000835let IMM=-1 in {
Hal Finkel47150812013-07-11 17:43:32 +0000836def V_SETALLONESB : VXForm_3<908, (outs vrrc:$vD), (ins),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000837 "vspltisw $vD, -1", IIC_VecFP,
Hal Finkel47150812013-07-11 17:43:32 +0000838 [(set v16i8:$vD, (v16i8 immAllOnesV))]>;
839def V_SETALLONESH : VXForm_3<908, (outs vrrc:$vD), (ins),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000840 "vspltisw $vD, -1", IIC_VecFP,
Hal Finkel47150812013-07-11 17:43:32 +0000841 [(set v8i16:$vD, (v8i16 immAllOnesV))]>;
842def V_SETALLONES : VXForm_3<908, (outs vrrc:$vD), (ins),
Hal Finkel3e5a3602013-11-27 23:26:09 +0000843 "vspltisw $vD, -1", IIC_VecFP,
Bill Schmidt74b2e722013-03-28 19:27:24 +0000844 [(set v4i32:$vD, (v4i32 immAllOnesV))]>;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000845}
Ulrich Weigand9d2e2022013-07-03 12:51:09 +0000846}
Adhemerval Zanella812410f2012-11-30 13:05:44 +0000847} // VALU Operations.
Chris Lattner2a85fa12006-03-25 07:51:43 +0000848
849//===----------------------------------------------------------------------===//
850// Additional Altivec Patterns
851//
852
Chris Lattner2a85fa12006-03-25 07:51:43 +0000853// Loads.
Chris Lattner868a75b2006-06-20 00:39:56 +0000854def : Pat<(v4i32 (load xoaddr:$src)), (LVX xoaddr:$src)>;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000855
856// Stores.
Bill Schmidt74b2e722013-03-28 19:27:24 +0000857def : Pat<(store v4i32:$rS, xoaddr:$dst),
858 (STVX $rS, xoaddr:$dst)>;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000859
860// Bit conversions.
861def : Pat<(v16i8 (bitconvert (v8i16 VRRC:$src))), (v16i8 VRRC:$src)>;
862def : Pat<(v16i8 (bitconvert (v4i32 VRRC:$src))), (v16i8 VRRC:$src)>;
863def : Pat<(v16i8 (bitconvert (v4f32 VRRC:$src))), (v16i8 VRRC:$src)>;
Bill Schmidtfe88b182015-02-03 21:58:23 +0000864def : Pat<(v16i8 (bitconvert (v2i64 VRRC:$src))), (v16i8 VRRC:$src)>;
Kit Bartond4eb73c2015-05-05 16:10:44 +0000865def : Pat<(v16i8 (bitconvert (v1i128 VRRC:$src))), (v16i8 VRRC:$src)>;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000866
867def : Pat<(v8i16 (bitconvert (v16i8 VRRC:$src))), (v8i16 VRRC:$src)>;
868def : Pat<(v8i16 (bitconvert (v4i32 VRRC:$src))), (v8i16 VRRC:$src)>;
869def : Pat<(v8i16 (bitconvert (v4f32 VRRC:$src))), (v8i16 VRRC:$src)>;
Bill Schmidtfe88b182015-02-03 21:58:23 +0000870def : Pat<(v8i16 (bitconvert (v2i64 VRRC:$src))), (v8i16 VRRC:$src)>;
Kit Bartond4eb73c2015-05-05 16:10:44 +0000871def : Pat<(v8i16 (bitconvert (v1i128 VRRC:$src))), (v8i16 VRRC:$src)>;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000872
873def : Pat<(v4i32 (bitconvert (v16i8 VRRC:$src))), (v4i32 VRRC:$src)>;
874def : Pat<(v4i32 (bitconvert (v8i16 VRRC:$src))), (v4i32 VRRC:$src)>;
875def : Pat<(v4i32 (bitconvert (v4f32 VRRC:$src))), (v4i32 VRRC:$src)>;
Bill Schmidtfe88b182015-02-03 21:58:23 +0000876def : Pat<(v4i32 (bitconvert (v2i64 VRRC:$src))), (v4i32 VRRC:$src)>;
Kit Bartond4eb73c2015-05-05 16:10:44 +0000877def : Pat<(v4i32 (bitconvert (v1i128 VRRC:$src))), (v4i32 VRRC:$src)>;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000878
879def : Pat<(v4f32 (bitconvert (v16i8 VRRC:$src))), (v4f32 VRRC:$src)>;
880def : Pat<(v4f32 (bitconvert (v8i16 VRRC:$src))), (v4f32 VRRC:$src)>;
881def : Pat<(v4f32 (bitconvert (v4i32 VRRC:$src))), (v4f32 VRRC:$src)>;
Bill Schmidtfe88b182015-02-03 21:58:23 +0000882def : Pat<(v4f32 (bitconvert (v2i64 VRRC:$src))), (v4f32 VRRC:$src)>;
Kit Bartond4eb73c2015-05-05 16:10:44 +0000883def : Pat<(v4f32 (bitconvert (v1i128 VRRC:$src))), (v4f32 VRRC:$src)>;
Bill Schmidtfe88b182015-02-03 21:58:23 +0000884
885def : Pat<(v2i64 (bitconvert (v16i8 VRRC:$src))), (v2i64 VRRC:$src)>;
886def : Pat<(v2i64 (bitconvert (v8i16 VRRC:$src))), (v2i64 VRRC:$src)>;
887def : Pat<(v2i64 (bitconvert (v4i32 VRRC:$src))), (v2i64 VRRC:$src)>;
888def : Pat<(v2i64 (bitconvert (v4f32 VRRC:$src))), (v2i64 VRRC:$src)>;
Kit Bartond4eb73c2015-05-05 16:10:44 +0000889def : Pat<(v2i64 (bitconvert (v1i128 VRRC:$src))), (v2i64 VRRC:$src)>;
890
891def : Pat<(v1i128 (bitconvert (v16i8 VRRC:$src))), (v1i128 VRRC:$src)>;
892def : Pat<(v1i128 (bitconvert (v8i16 VRRC:$src))), (v1i128 VRRC:$src)>;
893def : Pat<(v1i128 (bitconvert (v4i32 VRRC:$src))), (v1i128 VRRC:$src)>;
894def : Pat<(v1i128 (bitconvert (v4f32 VRRC:$src))), (v1i128 VRRC:$src)>;
895def : Pat<(v1i128 (bitconvert (v2i64 VRRC:$src))), (v1i128 VRRC:$src)>;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000896
Chris Lattner1d338192006-04-06 18:26:28 +0000897// Shuffles.
898
Chris Lattnera4bbfae2006-04-06 22:28:36 +0000899// Match vsldoi(x,x), vpkuwum(x,x), vpkuhum(x,x)
Bill Schmidt74b2e722013-03-28 19:27:24 +0000900def:Pat<(vsldoi_unary_shuffle:$in v16i8:$vA, undef),
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000901 (VSLDOI $vA, $vA, (VSLDOI_unary_get_imm $in))>;
Bill Schmidt74b2e722013-03-28 19:27:24 +0000902def:Pat<(vpkuwum_unary_shuffle v16i8:$vA, undef),
903 (VPKUWUM $vA, $vA)>;
904def:Pat<(vpkuhum_unary_shuffle v16i8:$vA, undef),
905 (VPKUHUM $vA, $vA)>;
Chris Lattner1d338192006-04-06 18:26:28 +0000906
Bill Schmidt42a69362014-08-05 20:47:25 +0000907// Match vsldoi(y,x), vpkuwum(y,x), vpkuhum(y,x), i.e., swapped operands.
908// These fragments are matched for little-endian, where the inputs must
909// be swapped for correct semantics.
910def:Pat<(vsldoi_swapped_shuffle:$in v16i8:$vA, v16i8:$vB),
911 (VSLDOI $vB, $vA, (VSLDOI_swapped_get_imm $in))>;
Ulrich Weigandcc9909b2014-08-04 13:53:40 +0000912def:Pat<(vpkuwum_swapped_shuffle v16i8:$vA, v16i8:$vB),
913 (VPKUWUM $vB, $vA)>;
914def:Pat<(vpkuhum_swapped_shuffle v16i8:$vA, v16i8:$vB),
915 (VPKUHUM $vB, $vA)>;
916
Chris Lattnerf38e0332006-04-06 22:02:42 +0000917// Match vmrg*(x,x)
Bill Schmidt74b2e722013-03-28 19:27:24 +0000918def:Pat<(vmrglb_unary_shuffle v16i8:$vA, undef),
919 (VMRGLB $vA, $vA)>;
920def:Pat<(vmrglh_unary_shuffle v16i8:$vA, undef),
921 (VMRGLH $vA, $vA)>;
922def:Pat<(vmrglw_unary_shuffle v16i8:$vA, undef),
923 (VMRGLW $vA, $vA)>;
924def:Pat<(vmrghb_unary_shuffle v16i8:$vA, undef),
925 (VMRGHB $vA, $vA)>;
926def:Pat<(vmrghh_unary_shuffle v16i8:$vA, undef),
927 (VMRGHH $vA, $vA)>;
928def:Pat<(vmrghw_unary_shuffle v16i8:$vA, undef),
929 (VMRGHW $vA, $vA)>;
Chris Lattnerf38e0332006-04-06 22:02:42 +0000930
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +0000931// Match vmrg*(y,x), i.e., swapped operands. These fragments
932// are matched for little-endian, where the inputs must be
933// swapped for correct semantics.
934def:Pat<(vmrglb_swapped_shuffle v16i8:$vA, v16i8:$vB),
935 (VMRGLB $vB, $vA)>;
936def:Pat<(vmrglh_swapped_shuffle v16i8:$vA, v16i8:$vB),
937 (VMRGLH $vB, $vA)>;
938def:Pat<(vmrglw_swapped_shuffle v16i8:$vA, v16i8:$vB),
939 (VMRGLW $vB, $vA)>;
940def:Pat<(vmrghb_swapped_shuffle v16i8:$vA, v16i8:$vB),
941 (VMRGHB $vB, $vA)>;
942def:Pat<(vmrghh_swapped_shuffle v16i8:$vA, v16i8:$vB),
943 (VMRGHH $vB, $vA)>;
944def:Pat<(vmrghw_swapped_shuffle v16i8:$vA, v16i8:$vB),
945 (VMRGHW $vB, $vA)>;
946
Chris Lattnerb3617be2006-03-25 22:16:05 +0000947// Logical Operations
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000948def : Pat<(vnot_ppc v4i32:$vA), (VNOR $vA, $vA)>;
Chris Lattner873202f2006-04-15 23:45:24 +0000949
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000950def : Pat<(vnot_ppc (or v4i32:$A, v4i32:$B)),
Bill Schmidt74b2e722013-03-28 19:27:24 +0000951 (VNOR $A, $B)>;
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000952def : Pat<(and v4i32:$A, (vnot_ppc v4i32:$B)),
Bill Schmidt74b2e722013-03-28 19:27:24 +0000953 (VANDC $A, $B)>;
Chris Lattner873202f2006-04-15 23:45:24 +0000954
Bill Schmidt74b2e722013-03-28 19:27:24 +0000955def : Pat<(fmul v4f32:$vA, v4f32:$vB),
956 (VMADDFP $vA, $vB,
Adhemerval Zanella812410f2012-11-30 13:05:44 +0000957 (v4i32 (VSLW (V_SETALLONES), (V_SETALLONES))))>;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000958
959// Fused multiply add and multiply sub for packed float. These are represented
960// separately from the real instructions above, for operations that must have
961// the additional precision, such as Newton-Rhapson (used by divide, sqrt)
Bill Schmidt74b2e722013-03-28 19:27:24 +0000962def : Pat<(PPCvmaddfp v4f32:$A, v4f32:$B, v4f32:$C),
963 (VMADDFP $A, $B, $C)>;
964def : Pat<(PPCvnmsubfp v4f32:$A, v4f32:$B, v4f32:$C),
965 (VNMSUBFP $A, $B, $C)>;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000966
Bill Schmidt74b2e722013-03-28 19:27:24 +0000967def : Pat<(int_ppc_altivec_vmaddfp v4f32:$A, v4f32:$B, v4f32:$C),
968 (VMADDFP $A, $B, $C)>;
969def : Pat<(int_ppc_altivec_vnmsubfp v4f32:$A, v4f32:$B, v4f32:$C),
970 (VNMSUBFP $A, $B, $C)>;
Chris Lattner2a85fa12006-03-25 07:51:43 +0000971
Ulrich Weigand084ff8e2013-04-03 14:08:13 +0000972def : Pat<(PPCvperm v16i8:$vA, v16i8:$vB, v16i8:$vC),
Bill Schmidt74b2e722013-03-28 19:27:24 +0000973 (VPERM $vA, $vB, $vC)>;
Eli Friedmanbe1bb0f2009-06-07 01:07:55 +0000974
Hal Finkel2e103312013-04-03 04:01:11 +0000975def : Pat<(PPCfre v4f32:$A), (VREFP $A)>;
976def : Pat<(PPCfrsqrte v4f32:$A), (VRSQRTEFP $A)>;
977
Eli Friedmanbe1bb0f2009-06-07 01:07:55 +0000978// Vector shifts
Bill Schmidt74b2e722013-03-28 19:27:24 +0000979def : Pat<(v16i8 (shl v16i8:$vA, v16i8:$vB)),
980 (v16i8 (VSLB $vA, $vB))>;
981def : Pat<(v8i16 (shl v8i16:$vA, v8i16:$vB)),
982 (v8i16 (VSLH $vA, $vB))>;
983def : Pat<(v4i32 (shl v4i32:$vA, v4i32:$vB)),
984 (v4i32 (VSLW $vA, $vB))>;
Eli Friedmanbe1bb0f2009-06-07 01:07:55 +0000985
Bill Schmidt74b2e722013-03-28 19:27:24 +0000986def : Pat<(v16i8 (srl v16i8:$vA, v16i8:$vB)),
987 (v16i8 (VSRB $vA, $vB))>;
988def : Pat<(v8i16 (srl v8i16:$vA, v8i16:$vB)),
989 (v8i16 (VSRH $vA, $vB))>;
990def : Pat<(v4i32 (srl v4i32:$vA, v4i32:$vB)),
991 (v4i32 (VSRW $vA, $vB))>;
Eli Friedmanbe1bb0f2009-06-07 01:07:55 +0000992
Bill Schmidt74b2e722013-03-28 19:27:24 +0000993def : Pat<(v16i8 (sra v16i8:$vA, v16i8:$vB)),
994 (v16i8 (VSRAB $vA, $vB))>;
995def : Pat<(v8i16 (sra v8i16:$vA, v8i16:$vB)),
996 (v8i16 (VSRAH $vA, $vB))>;
997def : Pat<(v4i32 (sra v4i32:$vA, v4i32:$vB)),
998 (v4i32 (VSRAW $vA, $vB))>;
Adhemerval Zanella5c6e0842012-10-08 17:27:24 +0000999
1000// Float to integer and integer to float conversions
Bill Schmidt74b2e722013-03-28 19:27:24 +00001001def : Pat<(v4i32 (fp_to_sint v4f32:$vA)),
1002 (VCTSXS_0 $vA)>;
1003def : Pat<(v4i32 (fp_to_uint v4f32:$vA)),
1004 (VCTUXS_0 $vA)>;
1005def : Pat<(v4f32 (sint_to_fp v4i32:$vA)),
1006 (VCFSX_0 $vA)>;
1007def : Pat<(v4f32 (uint_to_fp v4i32:$vA)),
1008 (VCFUX_0 $vA)>;
Adhemerval Zanellabdface52012-11-15 20:56:03 +00001009
1010// Floating-point rounding
Bill Schmidt74b2e722013-03-28 19:27:24 +00001011def : Pat<(v4f32 (ffloor v4f32:$vA)),
1012 (VRFIM $vA)>;
1013def : Pat<(v4f32 (fceil v4f32:$vA)),
1014 (VRFIP $vA)>;
1015def : Pat<(v4f32 (ftrunc v4f32:$vA)),
1016 (VRFIZ $vA)>;
1017def : Pat<(v4f32 (fnearbyint v4f32:$vA)),
1018 (VRFIN $vA)>;
Hal Finkelb0fac422013-03-15 13:21:21 +00001019
1020} // end HasAltivec
1021
Bill Schmidtfe88b182015-02-03 21:58:23 +00001022def HasP8Altivec : Predicate<"PPCSubTarget->hasP8Altivec()">;
Nemanja Ivanovice8effe12015-03-04 20:44:33 +00001023def HasP8Crypto : Predicate<"PPCSubTarget->hasP8Crypto()">;
Bill Schmidtfe88b182015-02-03 21:58:23 +00001024let Predicates = [HasP8Altivec] in {
Bill Schmidt433b1c32015-02-05 15:24:47 +00001025
Kit Barton0cfa7b72015-03-03 19:55:45 +00001026let isCommutable = 1 in {
1027def VMULESW : VX1_Int_Ty2<904, "vmulesw", int_ppc_altivec_vmulesw,
1028 v2i64, v4i32>;
1029def VMULEUW : VX1_Int_Ty2<648, "vmuleuw", int_ppc_altivec_vmuleuw,
1030 v2i64, v4i32>;
1031def VMULOSW : VX1_Int_Ty2<392, "vmulosw", int_ppc_altivec_vmulosw,
1032 v2i64, v4i32>;
1033def VMULOUW : VX1_Int_Ty2<136, "vmulouw", int_ppc_altivec_vmulouw,
1034 v2i64, v4i32>;
Kit Barton20d39812015-03-10 19:49:38 +00001035def VMULUWM : VXForm_1<137, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1036 "vmuluwm $vD, $vA, $vB", IIC_VecGeneral,
1037 [(set v4i32:$vD, (mul v4i32:$vA, v4i32:$vB))]>;
Kit Barton0cfa7b72015-03-03 19:55:45 +00001038def VMAXSD : VX1_Int_Ty<450, "vmaxsd", int_ppc_altivec_vmaxsd, v2i64>;
1039def VMAXUD : VX1_Int_Ty<194, "vmaxud", int_ppc_altivec_vmaxud, v2i64>;
1040def VMINSD : VX1_Int_Ty<962, "vminsd", int_ppc_altivec_vminsd, v2i64>;
Bill Schmidt17235252015-03-18 22:13:03 +00001041def VMINUD : VX1_Int_Ty<706, "vminud", int_ppc_altivec_vminud, v2i64>;
Kit Barton0cfa7b72015-03-03 19:55:45 +00001042} // isCommutable
1043
Kit Barton13894c72015-06-25 15:17:40 +00001044// Vector merge
1045def VMRGEW : VXForm_1<1932, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1046 "vmrgew $vD, $vA, $vB", IIC_VecFP,
1047 [(set v16i8:$vD, (vmrgew_shuffle v16i8:$vA, v16i8:$vB))]>;
1048def VMRGOW : VXForm_1<1676, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1049 "vmrgow $vD, $vA, $vB", IIC_VecFP,
1050 [(set v16i8:$vD, (vmrgow_shuffle v16i8:$vA, v16i8:$vB))]>;
1051
1052// Match vmrgew(x,x) and vmrgow(x,x)
1053def:Pat<(vmrgew_unary_shuffle v16i8:$vA, undef),
1054 (VMRGEW $vA, $vA)>;
1055def:Pat<(vmrgow_unary_shuffle v16i8:$vA, undef),
1056 (VMRGOW $vA, $vA)>;
1057
1058// Match vmrgew(y,x) and vmrgow(y,x), i.e., swapped operands. These fragments
1059// are matched for little-endian, where the inputs must be swapped for correct
1060// semantics.w
1061def:Pat<(vmrgew_swapped_shuffle v16i8:$vA, v16i8:$vB),
1062 (VMRGEW $vB, $vA)>;
1063def:Pat<(vmrgow_swapped_shuffle v16i8:$vA, v16i8:$vB),
1064 (VMRGOW $vB, $vA)>;
1065
1066
Kit Bartone48b1e12015-03-05 16:24:38 +00001067// Vector shifts
Kit Barton0cfa7b72015-03-03 19:55:45 +00001068def VRLD : VX1_Int_Ty<196, "vrld", int_ppc_altivec_vrld, v2i64>;
Kit Bartone48b1e12015-03-05 16:24:38 +00001069def VSLD : VXForm_1<1476, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1070 "vsld $vD, $vA, $vB", IIC_VecGeneral,
1071 [(set v2i64:$vD, (shl v2i64:$vA, v2i64:$vB))]>;
1072def VSRD : VXForm_1<1732, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1073 "vsrd $vD, $vA, $vB", IIC_VecGeneral,
1074 [(set v2i64:$vD, (srl v2i64:$vA, v2i64:$vB))]>;
1075def VSRAD : VXForm_1<964, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1076 "vsrad $vD, $vA, $vB", IIC_VecGeneral,
1077 [(set v2i64:$vD, (sra v2i64:$vA, v2i64:$vB))]>;
Kit Barton0cfa7b72015-03-03 19:55:45 +00001078
1079// Vector Integer Arithmetic Instructions
1080let isCommutable = 1 in {
1081def VADDUDM : VXForm_1<192, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1082 "vaddudm $vD, $vA, $vB", IIC_VecGeneral,
1083 [(set v2i64:$vD, (add v2i64:$vA, v2i64:$vB))]>;
Kit Barton66460332015-05-25 15:49:26 +00001084def VADDUQM : VXForm_1<256, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1085 "vadduqm $vD, $vA, $vB", IIC_VecGeneral,
1086 [(set v1i128:$vD, (add v1i128:$vA, v1i128:$vB))]>;
Kit Barton0cfa7b72015-03-03 19:55:45 +00001087} // isCommutable
1088
Kit Barton66460332015-05-25 15:49:26 +00001089// Vector Quadword Add
1090def VADDEUQM : VA1a_Int_Ty<60, "vaddeuqm", int_ppc_altivec_vaddeuqm, v1i128>;
1091def VADDCUQ : VX1_Int_Ty<320, "vaddcuq", int_ppc_altivec_vaddcuq, v1i128>;
1092def VADDECUQ : VA1a_Int_Ty<61, "vaddecuq", int_ppc_altivec_vaddecuq, v1i128>;
1093
1094// Vector Doubleword Subtract
Kit Barton0cfa7b72015-03-03 19:55:45 +00001095def VSUBUDM : VXForm_1<1216, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1096 "vsubudm $vD, $vA, $vB", IIC_VecGeneral,
1097 [(set v2i64:$vD, (sub v2i64:$vA, v2i64:$vB))]>;
1098
Kit Barton66460332015-05-25 15:49:26 +00001099// Vector Quadword Subtract
1100def VSUBUQM : VXForm_1<1280, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1101 "vsubuqm $vD, $vA, $vB", IIC_VecGeneral,
1102 [(set v1i128:$vD, (sub v1i128:$vA, v1i128:$vB))]>;
1103def VSUBEUQM : VA1a_Int_Ty<62, "vsubeuqm", int_ppc_altivec_vsubeuqm, v1i128>;
1104def VSUBCUQ : VX1_Int_Ty<1344, "vsubcuq", int_ppc_altivec_vsubcuq, v1i128>;
1105def VSUBECUQ : VA1a_Int_Ty<63, "vsubecuq", int_ppc_altivec_vsubecuq, v1i128>;
1106
Bill Schmidt433b1c32015-02-05 15:24:47 +00001107// Count Leading Zeros
1108def VCLZB : VXForm_2<1794, (outs vrrc:$vD), (ins vrrc:$vB),
1109 "vclzb $vD, $vB", IIC_VecGeneral,
1110 [(set v16i8:$vD, (ctlz v16i8:$vB))]>;
1111def VCLZH : VXForm_2<1858, (outs vrrc:$vD), (ins vrrc:$vB),
1112 "vclzh $vD, $vB", IIC_VecGeneral,
1113 [(set v8i16:$vD, (ctlz v8i16:$vB))]>;
1114def VCLZW : VXForm_2<1922, (outs vrrc:$vD), (ins vrrc:$vB),
1115 "vclzw $vD, $vB", IIC_VecGeneral,
1116 [(set v4i32:$vD, (ctlz v4i32:$vB))]>;
1117def VCLZD : VXForm_2<1986, (outs vrrc:$vD), (ins vrrc:$vB),
1118 "vclzd $vD, $vB", IIC_VecGeneral,
1119 [(set v2i64:$vD, (ctlz v2i64:$vB))]>;
1120
Bill Schmidtfe88b182015-02-03 21:58:23 +00001121// Population Count
1122def VPOPCNTB : VXForm_2<1795, (outs vrrc:$vD), (ins vrrc:$vB),
1123 "vpopcntb $vD, $vB", IIC_VecGeneral,
1124 [(set v16i8:$vD, (ctpop v16i8:$vB))]>;
1125def VPOPCNTH : VXForm_2<1859, (outs vrrc:$vD), (ins vrrc:$vB),
1126 "vpopcnth $vD, $vB", IIC_VecGeneral,
1127 [(set v8i16:$vD, (ctpop v8i16:$vB))]>;
1128def VPOPCNTW : VXForm_2<1923, (outs vrrc:$vD), (ins vrrc:$vB),
1129 "vpopcntw $vD, $vB", IIC_VecGeneral,
1130 [(set v4i32:$vD, (ctpop v4i32:$vB))]>;
1131def VPOPCNTD : VXForm_2<1987, (outs vrrc:$vD), (ins vrrc:$vB),
1132 "vpopcntd $vD, $vB", IIC_VecGeneral,
1133 [(set v2i64:$vD, (ctpop v2i64:$vB))]>;
Kit Barton0b0cdb12015-02-09 17:03:18 +00001134
1135let isCommutable = 1 in {
Kit Barton0b0cdb12015-02-09 17:03:18 +00001136// FIXME: Use AddedComplexity > 400 to ensure these patterns match before the
1137// VSX equivalents. We need to fix this up at some point. Two possible
1138// solutions for this problem:
1139// 1. Disable Altivec patterns that compete with VSX patterns using the
1140// !HasVSX predicate. This essentially favours VSX over Altivec, in
1141// hopes of reducing register pressure (larger register set using VSX
1142// instructions than VMX instructions)
1143// 2. Employ a more disciplined use of AddedComplexity, which would provide
1144// more fine-grained control than option 1. This would be beneficial
1145// if we find situations where Altivec is really preferred over VSX.
1146def VEQV : VXForm_1<1668, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1147 "veqv $vD, $vA, $vB", IIC_VecGeneral,
1148 [(set v4i32:$vD, (vnot_ppc (xor v4i32:$vA, v4i32:$vB)))]>;
1149def VNAND : VXForm_1<1412, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1150 "vnand $vD, $vA, $vB", IIC_VecGeneral,
1151 [(set v4i32:$vD, (vnot_ppc (and v4i32:$vA, v4i32:$vB)))]>;
Kit Barton263edb92015-02-20 15:54:58 +00001152} // isCommutable
1153
Kit Barton0b0cdb12015-02-09 17:03:18 +00001154def VORC : VXForm_1<1348, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1155 "vorc $vD, $vA, $vB", IIC_VecGeneral,
1156 [(set v4i32:$vD, (or v4i32:$vA,
1157 (vnot_ppc v4i32:$vB)))]>;
Kit Barton0cfa7b72015-03-03 19:55:45 +00001158
1159// i64 element comparisons.
1160def VCMPEQUD : VCMP <199, "vcmpequd $vD, $vA, $vB" , v2i64>;
1161def VCMPEQUDo : VCMPo<199, "vcmpequd. $vD, $vA, $vB", v2i64>;
1162def VCMPGTSD : VCMP <967, "vcmpgtsd $vD, $vA, $vB" , v2i64>;
1163def VCMPGTSDo : VCMPo<967, "vcmpgtsd. $vD, $vA, $vB", v2i64>;
1164def VCMPGTUD : VCMP <711, "vcmpgtud $vD, $vA, $vB" , v2i64>;
1165def VCMPGTUDo : VCMPo<711, "vcmpgtud. $vD, $vA, $vB", v2i64>;
1166
Nemanja Ivanovice8effe12015-03-04 20:44:33 +00001167// The cryptography instructions that do not require Category:Vector.Crypto
1168def VPMSUMB : VX1_Int_Ty<1032, "vpmsumb",
1169 int_ppc_altivec_crypto_vpmsumb, v16i8>;
1170def VPMSUMH : VX1_Int_Ty<1096, "vpmsumh",
1171 int_ppc_altivec_crypto_vpmsumh, v8i16>;
1172def VPMSUMW : VX1_Int_Ty<1160, "vpmsumw",
1173 int_ppc_altivec_crypto_vpmsumw, v4i32>;
1174def VPMSUMD : VX1_Int_Ty<1224, "vpmsumd",
1175 int_ppc_altivec_crypto_vpmsumd, v2i64>;
1176def VPERMXOR : VA1a_Int_Ty<45, "vpermxor",
1177 int_ppc_altivec_crypto_vpermxor, v16i8>;
1178
Bill Schmidt5ed84cd2015-05-16 01:02:12 +00001179// Vector doubleword integer pack and unpack.
1180def VPKSDSS : VX1_Int_Ty2<1486, "vpksdss", int_ppc_altivec_vpksdss,
1181 v4i32, v2i64>;
1182def VPKSDUS : VX1_Int_Ty2<1358, "vpksdus", int_ppc_altivec_vpksdus,
1183 v4i32, v2i64>;
1184def VPKUDUM : VXForm_1<1102, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1185 "vpkudum $vD, $vA, $vB", IIC_VecFP,
1186 [(set v16i8:$vD,
1187 (vpkudum_shuffle v16i8:$vA, v16i8:$vB))]>;
1188def VPKUDUS : VX1_Int_Ty2<1230, "vpkudus", int_ppc_altivec_vpkudus,
1189 v4i32, v2i64>;
1190def VUPKHSW : VX2_Int_Ty2<1614, "vupkhsw", int_ppc_altivec_vupkhsw,
1191 v2i64, v4i32>;
1192def VUPKLSW : VX2_Int_Ty2<1742, "vupklsw", int_ppc_altivec_vupklsw,
1193 v2i64, v4i32>;
1194
1195// Shuffle patterns for unary and swapped (LE) vector pack modulo.
1196def:Pat<(vpkudum_unary_shuffle v16i8:$vA, undef),
1197 (VPKUDUM $vA, $vA)>;
1198def:Pat<(vpkudum_swapped_shuffle v16i8:$vA, v16i8:$vB),
1199 (VPKUDUM $vB, $vA)>;
1200
Nemanja Ivanovicea1db8a2015-06-11 06:21:25 +00001201def VGBBD : VX2_Int_Ty2<1292, "vgbbd", int_ppc_altivec_vgbbd, v16i8, v16i8>;
1202def VBPERMQ : VX1_Int_Ty2<1356, "vbpermq", int_ppc_altivec_vbpermq,
1203 v2i64, v16i8>;
Bill Schmidtfe88b182015-02-03 21:58:23 +00001204} // end HasP8Altivec
Nemanja Ivanovice8effe12015-03-04 20:44:33 +00001205
1206// Crypto instructions (from builtins)
1207let Predicates = [HasP8Crypto] in {
1208def VSHASIGMAW : VXCR_Int_Ty<1666, "vshasigmaw",
1209 int_ppc_altivec_crypto_vshasigmaw, v4i32>;
1210def VSHASIGMAD : VXCR_Int_Ty<1730, "vshasigmad",
1211 int_ppc_altivec_crypto_vshasigmad, v2i64>;
1212def VCIPHER : VX1_Int_Ty<1288, "vcipher", int_ppc_altivec_crypto_vcipher,
1213 v2i64>;
1214def VCIPHERLAST : VX1_Int_Ty<1289, "vcipherlast",
1215 int_ppc_altivec_crypto_vcipherlast, v2i64>;
1216def VNCIPHER : VX1_Int_Ty<1352, "vncipher",
1217 int_ppc_altivec_crypto_vncipher, v2i64>;
1218def VNCIPHERLAST : VX1_Int_Ty<1353, "vncipherlast",
1219 int_ppc_altivec_crypto_vncipherlast, v2i64>;
1220def VSBOX : VXBX_Int_Ty<1480, "vsbox", int_ppc_altivec_crypto_vsbox, v2i64>;
1221} // HasP8Crypto
Kit Bartone7256692016-03-01 20:51:57 +00001222
1223// The following altivec instructions were introduced in Power ISA 3.0
1224def HasP9Altivec : Predicate<"PPCSubTarget->hasP9Altivec()">;
1225let Predicates = [HasP9Altivec] in {
1226
Kit Bartone7256692016-03-01 20:51:57 +00001227// i8 element comparisons.
Nemanja Ivanovic6f22b412016-09-27 08:42:12 +00001228def VCMPNEB : VCMP < 7, "vcmpneb $vD, $vA, $vB" , v16i8>;
1229def VCMPNEBo : VCMPo < 7, "vcmpneb. $vD, $vA, $vB" , v16i8>;
1230def VCMPNEZB : VCMP <263, "vcmpnezb $vD, $vA, $vB" , v16i8>;
1231def VCMPNEZBo : VCMPo<263, "vcmpnezb. $vD, $vA, $vB", v16i8>;
Kit Bartone7256692016-03-01 20:51:57 +00001232
1233// i16 element comparisons.
Nemanja Ivanovic6f22b412016-09-27 08:42:12 +00001234def VCMPNEH : VCMP < 71, "vcmpneh $vD, $vA, $vB" , v8i16>;
1235def VCMPNEHo : VCMPo< 71, "vcmpneh. $vD, $vA, $vB" , v8i16>;
1236def VCMPNEZH : VCMP <327, "vcmpnezh $vD, $vA, $vB" , v8i16>;
1237def VCMPNEZHo : VCMPo<327, "vcmpnezh. $vD, $vA, $vB", v8i16>;
Kit Bartone7256692016-03-01 20:51:57 +00001238
1239// i32 element comparisons.
Nemanja Ivanovic6f22b412016-09-27 08:42:12 +00001240def VCMPNEW : VCMP <135, "vcmpnew $vD, $vA, $vB" , v4i32>;
1241def VCMPNEWo : VCMPo<135, "vcmpnew. $vD, $vA, $vB" , v4i32>;
1242def VCMPNEZW : VCMP <391, "vcmpnezw $vD, $vA, $vB" , v4i32>;
1243def VCMPNEZWo : VCMPo<391, "vcmpnezw. $vD, $vA, $vB", v4i32>;
Kit Bartone7256692016-03-01 20:51:57 +00001244
1245// VX-Form: [PO VRT / UIM VRB XO].
1246// We use VXForm_1 to implement it, that is, we use "VRA" (5 bit) to represent
1247// "/ UIM" (1 + 4 bit)
1248class VX1_VT5_UIM5_VB5<bits<11> xo, string opc, list<dag> pattern>
1249 : VXForm_1<xo, (outs vrrc:$vD), (ins u4imm:$UIMM, vrrc:$vB),
1250 !strconcat(opc, " $vD, $vB, $UIMM"), IIC_VecGeneral, pattern>;
1251
1252class VX1_RT5_RA5_VB5<bits<11> xo, string opc, list<dag> pattern>
1253 : VXForm_1<xo, (outs g8rc:$rD), (ins g8rc:$rA, vrrc:$vB),
1254 !strconcat(opc, " $rD, $rA, $vB"), IIC_VecGeneral, pattern>;
1255
1256// Vector Extract Unsigned
1257def VEXTRACTUB : VX1_VT5_UIM5_VB5<525, "vextractub", []>;
1258def VEXTRACTUH : VX1_VT5_UIM5_VB5<589, "vextractuh", []>;
1259def VEXTRACTUW : VX1_VT5_UIM5_VB5<653, "vextractuw", []>;
1260def VEXTRACTD : VX1_VT5_UIM5_VB5<717, "vextractd" , []>;
1261
1262// Vector Extract Unsigned Byte/Halfword/Word Left/Right-Indexed
1263def VEXTUBLX : VX1_RT5_RA5_VB5<1549, "vextublx", []>;
1264def VEXTUBRX : VX1_RT5_RA5_VB5<1805, "vextubrx", []>;
1265def VEXTUHLX : VX1_RT5_RA5_VB5<1613, "vextuhlx", []>;
1266def VEXTUHRX : VX1_RT5_RA5_VB5<1869, "vextuhrx", []>;
1267def VEXTUWLX : VX1_RT5_RA5_VB5<1677, "vextuwlx", []>;
1268def VEXTUWRX : VX1_RT5_RA5_VB5<1933, "vextuwrx", []>;
1269
1270// Vector Insert Element Instructions
1271def VINSERTB : VX1_VT5_UIM5_VB5<781, "vinsertb", []>;
1272def VINSERTH : VX1_VT5_UIM5_VB5<845, "vinserth", []>;
1273def VINSERTW : VX1_VT5_UIM5_VB5<909, "vinsertw", []>;
1274def VINSERTD : VX1_VT5_UIM5_VB5<973, "vinsertd", []>;
Chuang-Yu Cheng065969e2016-03-26 05:46:11 +00001275
1276class VX_VT5_EO5_VB5<bits<11> xo, bits<5> eo, string opc, list<dag> pattern>
1277 : VXForm_RD5_XO5_RS5<xo, eo, (outs vrrc:$vD), (ins vrrc:$vB),
1278 !strconcat(opc, " $vD, $vB"), IIC_VecGeneral, pattern>;
Nemanja Ivanovic11049f82016-10-04 06:59:23 +00001279class VX_VT5_EO5_VB5s<bits<11> xo, bits<5> eo, string opc, list<dag> pattern>
1280 : VXForm_RD5_XO5_RS5<xo, eo, (outs vfrc:$vD), (ins vfrc:$vB),
1281 !strconcat(opc, " $vD, $vB"), IIC_VecGeneral, pattern>;
Chuang-Yu Cheng065969e2016-03-26 05:46:11 +00001282
1283// Vector Count Leading/Trailing Zero LSB. Result is placed into GPR[rD]
Nemanja Ivanovice28a0fc2016-10-28 19:38:24 +00001284def VCLZLSBB : VXForm_RD5_XO5_RS5<1538, 0, (outs gprc:$rD), (ins vrrc:$vB),
1285 "vclzlsbb $rD, $vB", IIC_VecGeneral,
1286 [(set i32:$rD, (int_ppc_altivec_vclzlsbb
1287 v16i8:$vB))]>;
1288def VCTZLSBB : VXForm_RD5_XO5_RS5<1538, 1, (outs gprc:$rD), (ins vrrc:$vB),
1289 "vctzlsbb $rD, $vB", IIC_VecGeneral,
1290 [(set i32:$rD, (int_ppc_altivec_vctzlsbb
1291 v16i8:$vB))]>;
Chuang-Yu Cheng065969e2016-03-26 05:46:11 +00001292// Vector Count Trailing Zeros
Nemanja Ivanovic6f22b412016-09-27 08:42:12 +00001293def VCTZB : VX_VT5_EO5_VB5<1538, 28, "vctzb",
1294 [(set v16i8:$vD, (cttz v16i8:$vB))]>;
1295def VCTZH : VX_VT5_EO5_VB5<1538, 29, "vctzh",
1296 [(set v8i16:$vD, (cttz v8i16:$vB))]>;
1297def VCTZW : VX_VT5_EO5_VB5<1538, 30, "vctzw",
1298 [(set v4i32:$vD, (cttz v4i32:$vB))]>;
1299def VCTZD : VX_VT5_EO5_VB5<1538, 31, "vctzd",
1300 [(set v2i64:$vD, (cttz v2i64:$vB))]>;
Chuang-Yu Cheng065969e2016-03-26 05:46:11 +00001301
1302// Vector Extend Sign
1303def VEXTSB2W : VX_VT5_EO5_VB5<1538, 16, "vextsb2w", []>;
1304def VEXTSH2W : VX_VT5_EO5_VB5<1538, 17, "vextsh2w", []>;
1305def VEXTSB2D : VX_VT5_EO5_VB5<1538, 24, "vextsb2d", []>;
1306def VEXTSH2D : VX_VT5_EO5_VB5<1538, 25, "vextsh2d", []>;
1307def VEXTSW2D : VX_VT5_EO5_VB5<1538, 26, "vextsw2d", []>;
Nemanja Ivanovic11049f82016-10-04 06:59:23 +00001308let isCodeGenOnly = 1 in {
1309 def VEXTSB2Ws : VX_VT5_EO5_VB5s<1538, 16, "vextsb2w", []>;
1310 def VEXTSH2Ws : VX_VT5_EO5_VB5s<1538, 17, "vextsh2w", []>;
1311 def VEXTSB2Ds : VX_VT5_EO5_VB5s<1538, 24, "vextsb2d", []>;
1312 def VEXTSH2Ds : VX_VT5_EO5_VB5s<1538, 25, "vextsh2d", []>;
1313 def VEXTSW2Ds : VX_VT5_EO5_VB5s<1538, 26, "vextsw2d", []>;
1314}
Chuang-Yu Cheng065969e2016-03-26 05:46:11 +00001315
1316// Vector Integer Negate
1317def VNEGW : VX_VT5_EO5_VB5<1538, 6, "vnegw", []>;
1318def VNEGD : VX_VT5_EO5_VB5<1538, 7, "vnegd", []>;
1319
1320// Vector Parity Byte
Nemanja Ivanovice28a0fc2016-10-28 19:38:24 +00001321def VPRTYBW : VX_VT5_EO5_VB5<1538, 8, "vprtybw", [(set v4i32:$vD,
1322 (int_ppc_altivec_vprtybw v4i32:$vB))]>;
1323def VPRTYBD : VX_VT5_EO5_VB5<1538, 9, "vprtybd", [(set v2i64:$vD,
1324 (int_ppc_altivec_vprtybd v2i64:$vB))]>;
1325def VPRTYBQ : VX_VT5_EO5_VB5<1538, 10, "vprtybq", [(set v1i128:$vD,
1326 (int_ppc_altivec_vprtybq v1i128:$vB))]>;
Chuang-Yu Cheng065969e2016-03-26 05:46:11 +00001327
1328// Vector (Bit) Permute (Right-indexed)
1329def VBPERMD : VXForm_1<1484, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1330 "vbpermd $vD, $vA, $vB", IIC_VecFP, []>;
1331def VPERMR : VAForm_1a<59, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, vrrc:$vC),
1332 "vpermr $vD, $vA, $vB, $vC", IIC_VecFP, []>;
1333
1334class VX1_VT5_VA5_VB5<bits<11> xo, string opc, list<dag> pattern>
1335 : VXForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1336 !strconcat(opc, " $vD, $vA, $vB"), IIC_VecFP, pattern>;
1337
1338// Vector Rotate Left Mask/Mask-Insert
1339def VRLWNM : VX1_VT5_VA5_VB5<389, "vrlwnm", []>;
1340def VRLWMI : VX1_VT5_VA5_VB5<133, "vrlwmi", []>;
1341def VRLDNM : VX1_VT5_VA5_VB5<453, "vrldnm", []>;
1342def VRLDMI : VX1_VT5_VA5_VB5<197, "vrldmi", []>;
1343
1344// Vector Shift Left/Right
1345def VSLV : VX1_VT5_VA5_VB5<1860, "vslv", []>;
1346def VSRV : VX1_VT5_VA5_VB5<1796, "vsrv", []>;
1347
1348// Vector Multiply-by-10 (& Write Carry) Unsigned Quadword
1349def VMUL10UQ : VXForm_BX<513, (outs vrrc:$vD), (ins vrrc:$vA),
1350 "vmul10uq $vD, $vA", IIC_VecFP, []>;
1351def VMUL10CUQ : VXForm_BX< 1, (outs vrrc:$vD), (ins vrrc:$vA),
1352 "vmul10cuq $vD, $vA", IIC_VecFP, []>;
1353
1354// Vector Multiply-by-10 Extended (& Write Carry) Unsigned Quadword
1355def VMUL10EUQ : VX1_VT5_VA5_VB5<577, "vmul10euq" , []>;
1356def VMUL10ECUQ : VX1_VT5_VA5_VB5< 65, "vmul10ecuq", []>;
Chuang-Yu Chengd5eb7742016-03-28 09:04:23 +00001357
1358// Decimal Integer Format Conversion Instructions
1359
1360// [PO VRT EO VRB 1 PS XO], "_o" means CR6 is set.
1361class VX_VT5_EO5_VB5_PS1_XO9_o<bits<5> eo, bits<9> xo, string opc,
1362 list<dag> pattern>
1363 : VX_RD5_EO5_RS5_PS1_XO9<eo, xo, (outs vrrc:$vD), (ins vrrc:$vB, u1imm:$PS),
1364 !strconcat(opc, " $vD, $vB, $PS"), IIC_VecFP, pattern> {
1365 let Defs = [CR6];
1366}
1367
1368// [PO VRT EO VRB 1 / XO]
1369class VX_VT5_EO5_VB5_XO9_o<bits<5> eo, bits<9> xo, string opc,
1370 list<dag> pattern>
1371 : VX_RD5_EO5_RS5_PS1_XO9<eo, xo, (outs vrrc:$vD), (ins vrrc:$vB),
1372 !strconcat(opc, " $vD, $vB"), IIC_VecFP, pattern> {
1373 let Defs = [CR6];
1374 let PS = 0;
1375}
1376
1377// Decimal Convert From/to National/Zoned/Signed-QWord
1378def BCDCFNo : VX_VT5_EO5_VB5_PS1_XO9_o<7, 385, "bcdcfn." , []>;
1379def BCDCFZo : VX_VT5_EO5_VB5_PS1_XO9_o<6, 385, "bcdcfz." , []>;
1380def BCDCTNo : VX_VT5_EO5_VB5_XO9_o <5, 385, "bcdctn." , []>;
1381def BCDCTZo : VX_VT5_EO5_VB5_PS1_XO9_o<4, 385, "bcdctz." , []>;
1382def BCDCFSQo : VX_VT5_EO5_VB5_PS1_XO9_o<2, 385, "bcdcfsq.", []>;
1383def BCDCTSQo : VX_VT5_EO5_VB5_XO9_o <0, 385, "bcdctsq.", []>;
1384
1385// Decimal Copy-Sign/Set-Sign
1386let Defs = [CR6] in
1387def BCDCPSGNo : VX1_VT5_VA5_VB5<833, "bcdcpsgn.", []>;
1388
1389def BCDSETSGNo : VX_VT5_EO5_VB5_PS1_XO9_o<31, 385, "bcdsetsgn.", []>;
1390
1391// [PO VRT VRA VRB 1 PS XO], "_o" means CR6 is set.
1392class VX_VT5_VA5_VB5_PS1_XO9_o<bits<9> xo, string opc, list<dag> pattern>
1393 : VX_RD5_RSp5_PS1_XO9<xo,
1394 (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, u1imm:$PS),
1395 !strconcat(opc, " $vD, $vA, $vB, $PS"), IIC_VecFP, pattern> {
1396 let Defs = [CR6];
1397}
1398
1399// [PO VRT VRA VRB 1 / XO]
1400class VX_VT5_VA5_VB5_XO9_o<bits<9> xo, string opc, list<dag> pattern>
1401 : VX_RD5_RSp5_PS1_XO9<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1402 !strconcat(opc, " $vD, $vA, $vB"), IIC_VecFP, pattern> {
1403 let Defs = [CR6];
1404 let PS = 0;
1405}
1406
1407// Decimal Shift/Unsigned-Shift/Shift-and-Round
1408def BCDSo : VX_VT5_VA5_VB5_PS1_XO9_o<193, "bcds." , []>;
1409def BCDUSo : VX_VT5_VA5_VB5_XO9_o <129, "bcdus.", []>;
1410def BCDSRo : VX_VT5_VA5_VB5_PS1_XO9_o<449, "bcdsr.", []>;
1411
1412// Decimal (Unsigned) Truncate
1413def BCDTRUNCo : VX_VT5_VA5_VB5_PS1_XO9_o<257, "bcdtrunc." , []>;
1414def BCDUTRUNCo : VX_VT5_VA5_VB5_XO9_o <321, "bcdutrunc.", []>;
Kit Bartone7256692016-03-01 20:51:57 +00001415} // end HasP9Altivec