blob: b955e014bdb2a5c7fad7f4bc849cdf3da03f5bd0 [file] [log] [blame]
Chris Lattnerb22a04d2006-03-25 07:51:43 +00001//===- PPCInstrAltivec.td - The PowerPC Altivec Extension --*- tablegen -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by Chris Lattner and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file describes the Altivec extension to the PowerPC instruction set.
11//
12//===----------------------------------------------------------------------===//
13
14//===----------------------------------------------------------------------===//
15// Altivec transformation functions and pattern fragments.
16//
17
18// VSPLT_get_imm xform function: convert vector_shuffle mask to VSPLT* imm.
19def VSPLT_get_imm : SDNodeXForm<build_vector, [{
20 return getI32Imm(PPC::getVSPLTImmediate(N));
21}]>;
22
23def VSPLT_shuffle_mask : PatLeaf<(build_vector), [{
24 return PPC::isSplatShuffleMask(N);
25}], VSPLT_get_imm>;
26
27def vecimm0 : PatLeaf<(build_vector), [{
28 return PPC::isZeroVector(N);
29}]>;
30
31
32// VSPLTISB_get_imm xform function: convert build_vector to VSPLTISB imm.
33def VSPLTISB_get_imm : SDNodeXForm<build_vector, [{
34 char Val;
35 PPC::isVecSplatImm(N, 1, &Val);
36 return getI32Imm(Val);
37}]>;
38def vecspltisb : PatLeaf<(build_vector), [{
39 return PPC::isVecSplatImm(N, 1);
40}], VSPLTISB_get_imm>;
41
42// VSPLTISH_get_imm xform function: convert build_vector to VSPLTISH imm.
43def VSPLTISH_get_imm : SDNodeXForm<build_vector, [{
44 char Val;
45 PPC::isVecSplatImm(N, 2, &Val);
46 return getI32Imm(Val);
47}]>;
48def vecspltish : PatLeaf<(build_vector), [{
49 return PPC::isVecSplatImm(N, 2);
50}], VSPLTISH_get_imm>;
51
52// VSPLTISW_get_imm xform function: convert build_vector to VSPLTISW imm.
53def VSPLTISW_get_imm : SDNodeXForm<build_vector, [{
54 char Val;
55 PPC::isVecSplatImm(N, 4, &Val);
56 return getI32Imm(Val);
57}]>;
58def vecspltisw : PatLeaf<(build_vector), [{
59 return PPC::isVecSplatImm(N, 4);
60}], VSPLTISW_get_imm>;
61
Chris Lattnerb8a45c22006-03-26 04:57:17 +000062class isVDOT { // vector dot instruction.
63 list<Register> Defs = [CR6];
64 bit RC = 1;
65}
Chris Lattnerb22a04d2006-03-25 07:51:43 +000066
67//===----------------------------------------------------------------------===//
68// Instruction Definitions.
69
70def IMPLICIT_DEF_VRRC : Pseudo<(ops VRRC:$rD), "; $rD = IMPLICIT_DEF_VRRC",
71 [(set VRRC:$rD, (v4f32 (undef)))]>;
72
73let isLoad = 1, PPC970_Unit = 2 in { // Loads.
74def LVEBX: XForm_1<31, 7, (ops VRRC:$vD, memrr:$src),
75 "lvebx $vD, $src", LdStGeneral,
76 [(set VRRC:$vD, (v16i8 (PPClve_x xoaddr:$src)))]>;
Chris Lattnere7d959c2006-03-26 00:41:48 +000077def LVEHX: XForm_1<31, 39, (ops VRRC:$vD, memrr:$src),
Chris Lattnerb22a04d2006-03-25 07:51:43 +000078 "lvehx $vD, $src", LdStGeneral,
79 [(set VRRC:$vD, (v8i16 (PPClve_x xoaddr:$src)))]>;
Chris Lattnere7d959c2006-03-26 00:41:48 +000080def LVEWX: XForm_1<31, 71, (ops VRRC:$vD, memrr:$src),
Chris Lattnerb22a04d2006-03-25 07:51:43 +000081 "lvewx $vD, $src", LdStGeneral,
82 [(set VRRC:$vD, (v4f32 (PPClve_x xoaddr:$src)))]>;
Chris Lattnere7d959c2006-03-26 00:41:48 +000083def LVX : XForm_1<31, 103, (ops VRRC:$vD, memrr:$src),
Chris Lattnerb22a04d2006-03-25 07:51:43 +000084 "lvx $vD, $src", LdStGeneral,
85 [(set VRRC:$vD, (v4f32 (load xoaddr:$src)))]>;
86}
87
88def LVSL : XForm_1<31, 6, (ops VRRC:$vD, GPRC:$base, GPRC:$rA),
89 "lvsl $vD, $base, $rA", LdStGeneral,
90 []>, PPC970_Unit_LSU;
91def LVSR : XForm_1<31, 38, (ops VRRC:$vD, GPRC:$base, GPRC:$rA),
92 "lvsl $vD, $base, $rA", LdStGeneral,
93 []>, PPC970_Unit_LSU;
94
95let isStore = 1, noResults = 1, PPC970_Unit = 2 in { // Stores.
96def STVEBX: XForm_8<31, 135, (ops VRRC:$rS, GPRC:$rA, GPRC:$rB),
97 "stvebx $rS, $rA, $rB", LdStGeneral,
98 []>;
99def STVEHX: XForm_8<31, 167, (ops VRRC:$rS, GPRC:$rA, GPRC:$rB),
100 "stvehx $rS, $rA, $rB", LdStGeneral,
101 []>;
102def STVEWX: XForm_8<31, 199, (ops VRRC:$rS, GPRC:$rA, GPRC:$rB),
103 "stvewx $rS, $rA, $rB", LdStGeneral,
104 []>;
105def STVX : XForm_8<31, 231, (ops VRRC:$rS, memrr:$dst),
106 "stvx $rS, $dst", LdStGeneral,
107 [(store (v4f32 VRRC:$rS), xoaddr:$dst)]>;
108}
109
110let PPC970_Unit = 5 in { // VALU Operations.
111// VA-Form instructions. 3-input AltiVec ops.
112def VMADDFP : VAForm_1<46, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
113 "vmaddfp $vD, $vA, $vC, $vB", VecFP,
114 [(set VRRC:$vD, (fadd (fmul VRRC:$vA, VRRC:$vC),
115 VRRC:$vB))]>,
116 Requires<[FPContractions]>;
117def VNMSUBFP: VAForm_1<47, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
118 "vnmsubfp $vD, $vA, $vC, $vB", VecFP,
119 [(set VRRC:$vD, (fneg (fsub (fmul VRRC:$vA, VRRC:$vC),
120 VRRC:$vB)))]>,
121 Requires<[FPContractions]>;
122
123def VPERM : VAForm_1<43, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
124 "vperm $vD, $vA, $vB, $vC", VecPerm,
125 [(set VRRC:$vD,
126 (PPCvperm (v4f32 VRRC:$vA), VRRC:$vB, VRRC:$vC))]>;
Chris Lattnere7d959c2006-03-26 00:41:48 +0000127def VSLDOI : VAForm_2<44, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, u5imm:$SH),
128 "vsldoi $vD, $vA, $vB, $SH", VecFP,
129 [(set VRRC:$vD,
130 (int_ppc_altivec_vsldoi VRRC:$vA, VRRC:$vB,
131 imm:$SH))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000132
133// VX-Form instructions. AltiVec arithmetic ops.
Chris Lattner984f38b2006-03-25 08:01:02 +0000134def VADDCUW : VXForm_1<384, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
135 "vaddcuw $vD, $vA, $vB", VecFP,
136 [(set VRRC:$vD,
137 (int_ppc_altivec_vaddcuw VRRC:$vA, VRRC:$vB))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000138def VADDFP : VXForm_1<10, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
139 "vaddfp $vD, $vA, $vB", VecFP,
140 [(set VRRC:$vD, (fadd VRRC:$vA, VRRC:$vB))]>;
Chris Lattner5d729072006-03-26 02:39:02 +0000141
142def VADDUBM : VXForm_1<0, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
143 "vaddubm $vD, $vA, $vB", VecGeneral,
144 [(set VRRC:$vD, (add (v16i8 VRRC:$vA), VRRC:$vB))]>;
145def VADDUHM : VXForm_1<64, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
146 "vadduhm $vD, $vA, $vB", VecGeneral,
147 [(set VRRC:$vD, (add (v8i16 VRRC:$vA), VRRC:$vB))]>;
148def VADDUWM : VXForm_1<128, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
149 "vadduwm $vD, $vA, $vB", VecGeneral,
150 [(set VRRC:$vD, (add (v4i32 VRRC:$vA), VRRC:$vB))]>;
151
Chris Lattner984f38b2006-03-25 08:01:02 +0000152def VADDSBS : VXForm_1<768, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
153 "vaddsbs $vD, $vA, $vB", VecFP,
154 [(set VRRC:$vD,
155 (int_ppc_altivec_vaddsbs VRRC:$vA, VRRC:$vB))]>;
156def VADDSHS : VXForm_1<832, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
157 "vaddshs $vD, $vA, $vB", VecFP,
158 [(set VRRC:$vD,
159 (int_ppc_altivec_vaddshs VRRC:$vA, VRRC:$vB))]>;
160def VADDSWS : VXForm_1<896, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
161 "vaddsws $vD, $vA, $vB", VecFP,
162 [(set VRRC:$vD,
163 (int_ppc_altivec_vaddsws VRRC:$vA, VRRC:$vB))]>;
Chris Lattner5d729072006-03-26 02:39:02 +0000164
Chris Lattner984f38b2006-03-25 08:01:02 +0000165def VADDUBS : VXForm_1<512, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
166 "vaddubs $vD, $vA, $vB", VecFP,
167 [(set VRRC:$vD,
168 (int_ppc_altivec_vaddubs VRRC:$vA, VRRC:$vB))]>;
169def VADDUHS : VXForm_1<576, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
170 "vadduhs $vD, $vA, $vB", VecFP,
171 [(set VRRC:$vD,
172 (int_ppc_altivec_vadduhs VRRC:$vA, VRRC:$vB))]>;
Chris Lattner984f38b2006-03-25 08:01:02 +0000173def VADDUWS : VXForm_1<640, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
174 "vadduws $vD, $vA, $vB", VecFP,
175 [(set VRRC:$vD,
176 (int_ppc_altivec_vadduws VRRC:$vA, VRRC:$vB))]>;
Chris Lattner2430a5f2006-03-25 22:16:05 +0000177def VAND : VXForm_1<1028, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
178 "vand $vD, $vA, $vB", VecFP,
179 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), VRRC:$vB))]>;
180def VANDC : VXForm_1<1092, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
181 "vandc $vD, $vA, $vB", VecFP,
Chris Lattneraf9136b2006-03-25 23:10:40 +0000182 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), (vnot VRRC:$vB)))]>;
Chris Lattner2430a5f2006-03-25 22:16:05 +0000183
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000184def VCFSX : VXForm_1<842, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
185 "vcfsx $vD, $vB, $UIMM", VecFP,
Chris Lattner984f38b2006-03-25 08:01:02 +0000186 [(set VRRC:$vD,
187 (int_ppc_altivec_vcfsx VRRC:$vB, imm:$UIMM))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000188def VCFUX : VXForm_1<778, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
189 "vcfux $vD, $vB, $UIMM", VecFP,
Chris Lattner984f38b2006-03-25 08:01:02 +0000190 [(set VRRC:$vD,
191 (int_ppc_altivec_vcfux VRRC:$vB, imm:$UIMM))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000192def VCTSXS : VXForm_1<970, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
193 "vctsxs $vD, $vB, $UIMM", VecFP,
194 []>;
195def VCTUXS : VXForm_1<906, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
196 "vctuxs $vD, $vB, $UIMM", VecFP,
197 []>;
198def VEXPTEFP : VXForm_2<394, (ops VRRC:$vD, VRRC:$vB),
199 "vexptefp $vD, $vB", VecFP,
200 []>;
201def VLOGEFP : VXForm_2<458, (ops VRRC:$vD, VRRC:$vB),
202 "vlogefp $vD, $vB", VecFP,
203 []>;
204def VMAXFP : VXForm_1<1034, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
205 "vmaxfp $vD, $vA, $vB", VecFP,
206 []>;
207def VMINFP : VXForm_1<1098, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
208 "vminfp $vD, $vA, $vB", VecFP,
209 []>;
210def VREFP : VXForm_2<266, (ops VRRC:$vD, VRRC:$vB),
211 "vrefp $vD, $vB", VecFP,
212 []>;
213def VRFIM : VXForm_2<714, (ops VRRC:$vD, VRRC:$vB),
214 "vrfim $vD, $vB", VecFP,
215 []>;
216def VRFIN : VXForm_2<522, (ops VRRC:$vD, VRRC:$vB),
217 "vrfin $vD, $vB", VecFP,
218 []>;
219def VRFIP : VXForm_2<650, (ops VRRC:$vD, VRRC:$vB),
220 "vrfip $vD, $vB", VecFP,
221 []>;
222def VRFIZ : VXForm_2<586, (ops VRRC:$vD, VRRC:$vB),
223 "vrfiz $vD, $vB", VecFP,
224 []>;
225def VRSQRTEFP : VXForm_2<330, (ops VRRC:$vD, VRRC:$vB),
226 "vrsqrtefp $vD, $vB", VecFP,
Chris Lattner984f38b2006-03-25 08:01:02 +0000227 [(set VRRC:$vD,(int_ppc_altivec_vrsqrtefp VRRC:$vB))]>;
Chris Lattner5d729072006-03-26 02:39:02 +0000228def VSUBCUW : VXForm_1<74, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
229 "vsubcuw $vD, $vA, $vB", VecFP,
230 [(set VRRC:$vD,
231 (int_ppc_altivec_vsubcuw VRRC:$vA, VRRC:$vB))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000232def VSUBFP : VXForm_1<74, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
233 "vsubfp $vD, $vA, $vB", VecFP,
234 [(set VRRC:$vD, (fsub VRRC:$vA, VRRC:$vB))]>;
Chris Lattner5d729072006-03-26 02:39:02 +0000235
236def VSUBUBM : VXForm_1<1024, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
237 "vsububm $vD, $vA, $vB", VecGeneral,
238 [(set VRRC:$vD, (sub (v16i8 VRRC:$vA), VRRC:$vB))]>;
239def VSUBUHM : VXForm_1<1088, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
240 "vsubuhm $vD, $vA, $vB", VecGeneral,
241 [(set VRRC:$vD, (sub (v8i16 VRRC:$vA), VRRC:$vB))]>;
242def VSUBUWM : VXForm_1<1152, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
243 "vsubuwm $vD, $vA, $vB", VecGeneral,
244 [(set VRRC:$vD, (sub (v4i32 VRRC:$vA), VRRC:$vB))]>;
245
246def VSUBSBS : VXForm_1<1792, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
247 "vsubsbs $vD, $vA, $vB", VecFP,
248 [(set VRRC:$vD,
249 (int_ppc_altivec_vsubsbs VRRC:$vA, VRRC:$vB))]>;
250def VSUBSHS : VXForm_1<1856, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
251 "vsubshs $vD, $vA, $vB", VecFP,
252 [(set VRRC:$vD,
253 (int_ppc_altivec_vsubshs VRRC:$vA, VRRC:$vB))]>;
254def VSUBSWS : VXForm_1<1920, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
255 "vsubsws $vD, $vA, $vB", VecFP,
256 [(set VRRC:$vD,
257 (int_ppc_altivec_vsubsws VRRC:$vA, VRRC:$vB))]>;
258
259def VSUBUBS : VXForm_1<1536, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
260 "vsububs $vD, $vA, $vB", VecFP,
261 [(set VRRC:$vD,
262 (int_ppc_altivec_vsububs VRRC:$vA, VRRC:$vB))]>;
263def VSUBUHS : VXForm_1<1600, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
264 "vsubuhs $vD, $vA, $vB", VecFP,
265 [(set VRRC:$vD,
266 (int_ppc_altivec_vsubuhs VRRC:$vA, VRRC:$vB))]>;
267def VSUBUWS : VXForm_1<1664, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
268 "vsubuws $vD, $vA, $vB", VecFP,
269 [(set VRRC:$vD,
270 (int_ppc_altivec_vsubuws VRRC:$vA, VRRC:$vB))]>;
271
Chris Lattner2430a5f2006-03-25 22:16:05 +0000272def VNOR : VXForm_1<1284, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
273 "vnor $vD, $vA, $vB", VecFP,
Chris Lattner6509ae82006-03-25 23:05:29 +0000274 [(set VRRC:$vD, (vnot (or (v4i32 VRRC:$vA), VRRC:$vB)))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000275def VOR : VXForm_1<1156, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
276 "vor $vD, $vA, $vB", VecFP,
Chris Lattner2430a5f2006-03-25 22:16:05 +0000277 [(set VRRC:$vD, (or (v4i32 VRRC:$vA), VRRC:$vB))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000278def VXOR : VXForm_1<1220, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
279 "vxor $vD, $vA, $vB", VecFP,
Chris Lattner2430a5f2006-03-25 22:16:05 +0000280 [(set VRRC:$vD, (xor (v4i32 VRRC:$vA), VRRC:$vB))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000281
282def VSPLTB : VXForm_1<524, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
283 "vspltb $vD, $vB, $UIMM", VecPerm,
284 []>;
285def VSPLTH : VXForm_1<588, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
286 "vsplth $vD, $vB, $UIMM", VecPerm,
287 []>;
288def VSPLTW : VXForm_1<652, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
289 "vspltw $vD, $vB, $UIMM", VecPerm,
290 [(set VRRC:$vD, (vector_shuffle (v4f32 VRRC:$vB), (undef),
291 VSPLT_shuffle_mask:$UIMM))]>;
292
293def VSPLTISB : VXForm_1<780, (ops VRRC:$vD, s5imm:$SIMM),
294 "vspltisb $vD, $SIMM", VecPerm,
295 [(set VRRC:$vD, (v4f32 vecspltisb:$SIMM))]>;
296def VSPLTISH : VXForm_1<844, (ops VRRC:$vD, s5imm:$SIMM),
297 "vspltish $vD, $SIMM", VecPerm,
298 [(set VRRC:$vD, (v4f32 vecspltish:$SIMM))]>;
299def VSPLTISW : VXForm_1<908, (ops VRRC:$vD, s5imm:$SIMM),
300 "vspltisw $vD, $SIMM", VecPerm,
301 [(set VRRC:$vD, (v4f32 vecspltisw:$SIMM))]>;
302
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000303
Chris Lattnerb8a45c22006-03-26 04:57:17 +0000304// Altivec Comparisons.
305
306// f32 element comparisons.
307def VCMPBFP : VXRForm_1<966, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
308 "vcmpbfp $vD, $vA, $vB", VecFPCompare,
309 [(set VRRC:$vD,
310 (int_ppc_altivec_vcmpbfp VRRC:$vA, VRRC:$vB))]>;
311def VCMPBFPo : VXRForm_1<966, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
312 "vcmpbfp. $vD, $vA, $vB", VecFPCompare,
313 []>, isVDOT;
314def VCMPEQFP : VXRForm_1<198, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
315 "vcmpeqfp $vD, $vA, $vB", VecFPCompare,
316 [(set VRRC:$vD,
317 (int_ppc_altivec_vcmpeqfp VRRC:$vA, VRRC:$vB))]>;
318def VCMPEQFPo : VXRForm_1<198, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
319 "vcmpeqfp. $vD, $vA, $vB", VecFPCompare,
320 []>, isVDOT;
321def VCMPGEFP : VXRForm_1<454, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
322 "vcmpgefp $vD, $vA, $vB", VecFPCompare,
323 [(set VRRC:$vD,
324 (int_ppc_altivec_vcmpgefp VRRC:$vA, VRRC:$vB))]>;
325def VCMPGEFPo : VXRForm_1<454, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
326 "vcmpgefp. $vD, $vA, $vB", VecFPCompare,
327 []>, isVDOT;
328def VCMPGTFP : VXRForm_1<710, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
329 "vcmpgtfp $vD, $vA, $vB", VecFPCompare,
330 [(set VRRC:$vD,
331 (int_ppc_altivec_vcmpgtfp VRRC:$vA, VRRC:$vB))]>;
332def VCMPGTFPo : VXRForm_1<710, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
333 "vcmpgtfp. $vD, $vA, $vB", VecFPCompare,
334 []>, isVDOT;
335
336// i8 element comparisons.
337def VCMPEQUB : VXRForm_1<6, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
338 "vcmpequb $vD, $vA, $vB", VecFPCompare,
339 [(set VRRC:$vD,
340 (int_ppc_altivec_vcmpequb VRRC:$vA, VRRC:$vB))]>;
341def VCMPEQUBo : VXRForm_1<6, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
342 "vcmpequb. $vD, $vA, $vB", VecFPCompare,
343 []>, isVDOT;
344def VCMPGTSB : VXRForm_1<774, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
345 "vcmpgtsb $vD, $vA, $vB", VecFPCompare,
346 [(set VRRC:$vD,
347 (int_ppc_altivec_vcmpgtsb VRRC:$vA, VRRC:$vB))]>;
348def VCMPGTSBo : VXRForm_1<774, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
349 "vcmpgtsb. $vD, $vA, $vB", VecFPCompare,
350 []>, isVDOT;
351def VCMPGTUB : VXRForm_1<518, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
352 "vcmpgtub $vD, $vA, $vB", VecFPCompare,
353 [(set VRRC:$vD,
354 (int_ppc_altivec_vcmpgtub VRRC:$vA, VRRC:$vB))]>;
355def VCMPGTUBo : VXRForm_1<518, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
356 "vcmpgtub. $vD, $vA, $vB", VecFPCompare,
357 []>, isVDOT;
358
359// i16 element comparisons.
360def VCMPEQUH : VXRForm_1<70, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
361 "vcmpequh $vD, $vA, $vB", VecFPCompare,
362 [(set VRRC:$vD,
363 (int_ppc_altivec_vcmpequh VRRC:$vA, VRRC:$vB))]>;
364def VCMPEQUHo : VXRForm_1<70, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
365 "vcmpequh. $vD, $vA, $vB", VecFPCompare,
366 []>, isVDOT;
367def VCMPGTSH : VXRForm_1<838, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
368 "vcmpgtsh $vD, $vA, $vB", VecFPCompare,
369 [(set VRRC:$vD,
370 (int_ppc_altivec_vcmpgtsh VRRC:$vA, VRRC:$vB))]>;
371def VCMPGTSHo : VXRForm_1<838, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
372 "vcmpgtsh. $vD, $vA, $vB", VecFPCompare,
373 []>, isVDOT;
374def VCMPGTUH : VXRForm_1<582, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
375 "vcmpgtuh $vD, $vA, $vB", VecFPCompare,
376 [(set VRRC:$vD,
377 (int_ppc_altivec_vcmpgtuh VRRC:$vA, VRRC:$vB))]>;
378def VCMPGTUHo : VXRForm_1<582, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
379 "vcmpgtuh. $vD, $vA, $vB", VecFPCompare,
380 []>, isVDOT;
381
382// i32 element comparisons.
383def VCMPEQUW : VXRForm_1<134, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
384 "vcmpequw $vD, $vA, $vB", VecFPCompare,
385 [(set VRRC:$vD,
386 (int_ppc_altivec_vcmpequw VRRC:$vA, VRRC:$vB))]>;
387def VCMPEQUWo : VXRForm_1<134, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
388 "vcmpequw. $vD, $vA, $vB", VecFPCompare,
389 []>, isVDOT;
390def VCMPGTSW : VXRForm_1<902, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
391 "vcmpgtsw $vD, $vA, $vB", VecFPCompare,
392 [(set VRRC:$vD,
393 (int_ppc_altivec_vcmpgtsw VRRC:$vA, VRRC:$vB))]>;
394def VCMPGTSWo : VXRForm_1<902, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
395 "vcmpgtsw. $vD, $vA, $vB", VecFPCompare,
396 []>, isVDOT;
397def VCMPGTUW : VXRForm_1<646, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
398 "vcmpgtuw $vD, $vA, $vB", VecFPCompare,
399 [(set VRRC:$vD,
400 (int_ppc_altivec_vcmpgtuw VRRC:$vA, VRRC:$vB))]>;
401def VCMPGTUWo : VXRForm_1<646, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
402 "vcmpgtuw. $vD, $vA, $vB", VecFPCompare,
403 []>, isVDOT;
404
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000405def V_SET0 : VXForm_setzero<1220, (ops VRRC:$vD),
406 "vxor $vD, $vD, $vD", VecFP,
407 [(set VRRC:$vD, (v4f32 vecimm0))]>;
408}
409
410//===----------------------------------------------------------------------===//
411// Additional Altivec Patterns
412//
413
414// Undef/Zero.
415def : Pat<(v16i8 (undef)), (v16i8 (IMPLICIT_DEF_VRRC))>;
416def : Pat<(v8i16 (undef)), (v8i16 (IMPLICIT_DEF_VRRC))>;
417def : Pat<(v4i32 (undef)), (v4i32 (IMPLICIT_DEF_VRRC))>;
418def : Pat<(v16i8 vecimm0), (v16i8 (V_SET0))>;
419def : Pat<(v8i16 vecimm0), (v8i16 (V_SET0))>;
420def : Pat<(v4i32 vecimm0), (v4i32 (V_SET0))>;
421
422// Loads.
423def : Pat<(v16i8 (load xoaddr:$src)), (v16i8 (LVX xoaddr:$src))>;
424def : Pat<(v8i16 (load xoaddr:$src)), (v8i16 (LVX xoaddr:$src))>;
425def : Pat<(v4i32 (load xoaddr:$src)), (v4i32 (LVX xoaddr:$src))>;
426
427// Stores.
428def : Pat<(store (v16i8 VRRC:$rS), xoaddr:$dst),
429 (STVX (v16i8 VRRC:$rS), xoaddr:$dst)>;
430def : Pat<(store (v8i16 VRRC:$rS), xoaddr:$dst),
431 (STVX (v8i16 VRRC:$rS), xoaddr:$dst)>;
432def : Pat<(store (v4i32 VRRC:$rS), xoaddr:$dst),
433 (STVX (v4i32 VRRC:$rS), xoaddr:$dst)>;
434
435// Bit conversions.
436def : Pat<(v16i8 (bitconvert (v8i16 VRRC:$src))), (v16i8 VRRC:$src)>;
437def : Pat<(v16i8 (bitconvert (v4i32 VRRC:$src))), (v16i8 VRRC:$src)>;
438def : Pat<(v16i8 (bitconvert (v4f32 VRRC:$src))), (v16i8 VRRC:$src)>;
439
440def : Pat<(v8i16 (bitconvert (v16i8 VRRC:$src))), (v8i16 VRRC:$src)>;
441def : Pat<(v8i16 (bitconvert (v4i32 VRRC:$src))), (v8i16 VRRC:$src)>;
442def : Pat<(v8i16 (bitconvert (v4f32 VRRC:$src))), (v8i16 VRRC:$src)>;
443
444def : Pat<(v4i32 (bitconvert (v16i8 VRRC:$src))), (v4i32 VRRC:$src)>;
445def : Pat<(v4i32 (bitconvert (v8i16 VRRC:$src))), (v4i32 VRRC:$src)>;
446def : Pat<(v4i32 (bitconvert (v4f32 VRRC:$src))), (v4i32 VRRC:$src)>;
447
448def : Pat<(v4f32 (bitconvert (v16i8 VRRC:$src))), (v4f32 VRRC:$src)>;
449def : Pat<(v4f32 (bitconvert (v8i16 VRRC:$src))), (v4f32 VRRC:$src)>;
450def : Pat<(v4f32 (bitconvert (v4i32 VRRC:$src))), (v4f32 VRRC:$src)>;
451
452// Immediate vector formation with vsplti*.
453def : Pat<(v16i8 vecspltisb:$invec), (v16i8 (VSPLTISB vecspltisb:$invec))>;
454def : Pat<(v16i8 vecspltish:$invec), (v16i8 (VSPLTISH vecspltish:$invec))>;
455def : Pat<(v16i8 vecspltisw:$invec), (v16i8 (VSPLTISW vecspltisw:$invec))>;
456
457def : Pat<(v8i16 vecspltisb:$invec), (v8i16 (VSPLTISB vecspltisb:$invec))>;
458def : Pat<(v8i16 vecspltish:$invec), (v8i16 (VSPLTISH vecspltish:$invec))>;
459def : Pat<(v8i16 vecspltisw:$invec), (v8i16 (VSPLTISW vecspltisw:$invec))>;
460
461def : Pat<(v4i32 vecspltisb:$invec), (v4i32 (VSPLTISB vecspltisb:$invec))>;
462def : Pat<(v4i32 vecspltish:$invec), (v4i32 (VSPLTISH vecspltish:$invec))>;
463def : Pat<(v4i32 vecspltisw:$invec), (v4i32 (VSPLTISW vecspltisw:$invec))>;
464
Chris Lattner2430a5f2006-03-25 22:16:05 +0000465// Logical Operations
466def : Pat<(v16i8 (and VRRC:$A, VRRC:$B)), (v16i8 (VAND VRRC:$A, VRRC:$B))>;
467def : Pat<(v8i16 (and VRRC:$A, VRRC:$B)), (v8i16 (VAND VRRC:$A, VRRC:$B))>;
468def : Pat<(v16i8 (or VRRC:$A, VRRC:$B)), (v16i8 (VOR VRRC:$A, VRRC:$B))>;
469def : Pat<(v8i16 (or VRRC:$A, VRRC:$B)), (v8i16 (VOR VRRC:$A, VRRC:$B))>;
470def : Pat<(v16i8 (xor VRRC:$A, VRRC:$B)), (v16i8 (VXOR VRRC:$A, VRRC:$B))>;
471def : Pat<(v8i16 (xor VRRC:$A, VRRC:$B)), (v8i16 (VXOR VRRC:$A, VRRC:$B))>;
Chris Lattner6509ae82006-03-25 23:05:29 +0000472def : Pat<(v16i8 (vnot (or VRRC:$A, VRRC:$B))),(v16i8 (VNOR VRRC:$A, VRRC:$B))>;
473def : Pat<(v8i16 (vnot (or VRRC:$A, VRRC:$B))),(v8i16 (VNOR VRRC:$A, VRRC:$B))>;
Chris Lattneraf9136b2006-03-25 23:10:40 +0000474def : Pat<(v16i8 (and VRRC:$A, (vnot VRRC:$B))),
Chris Lattner6509ae82006-03-25 23:05:29 +0000475 (v16i8 (VANDC VRRC:$A, VRRC:$B))>;
Chris Lattneraf9136b2006-03-25 23:10:40 +0000476def : Pat<(v8i16 (and VRRC:$A, (vnot VRRC:$B))),
Chris Lattner6509ae82006-03-25 23:05:29 +0000477 (v8i16 (VANDC VRRC:$A, VRRC:$B))>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000478
479def : Pat<(fmul VRRC:$vA, VRRC:$vB),
480 (VMADDFP VRRC:$vA, VRRC:$vB, (V_SET0))>;
481
482// Fused multiply add and multiply sub for packed float. These are represented
483// separately from the real instructions above, for operations that must have
484// the additional precision, such as Newton-Rhapson (used by divide, sqrt)
485def : Pat<(PPCvmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
486 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
487def : Pat<(PPCvnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
488 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
489
490def : Pat<(int_ppc_altivec_vmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
491 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
492def : Pat<(int_ppc_altivec_vnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
493 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
494
495def : Pat<(vector_shuffle (v4i32 VRRC:$vB), (undef), VSPLT_shuffle_mask:$UIMM),
496 (v4i32 (VSPLTW VSPLT_shuffle_mask:$UIMM, VRRC:$vB))>;
497
498def : Pat<(PPCvperm (v4i32 VRRC:$vA), VRRC:$vB, VRRC:$vC),
499 (v4i32 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>;
500
501def : Pat<(v4i32 (PPClve_x xoaddr:$src)),
502 (v4i32 (LVEWX xoaddr:$src))>;
503
504