blob: 1c3057d884e1118bd34b5a6bab6246147df71dc1 [file] [log] [blame]
Chris Lattnerb22a04d2006-03-25 07:51:43 +00001//===- PPCInstrAltivec.td - The PowerPC Altivec Extension --*- tablegen -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by Chris Lattner and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file describes the Altivec extension to the PowerPC instruction set.
11//
12//===----------------------------------------------------------------------===//
13
14//===----------------------------------------------------------------------===//
15// Altivec transformation functions and pattern fragments.
16//
17
18// VSPLT_get_imm xform function: convert vector_shuffle mask to VSPLT* imm.
19def VSPLT_get_imm : SDNodeXForm<build_vector, [{
20 return getI32Imm(PPC::getVSPLTImmediate(N));
21}]>;
22
23def VSPLT_shuffle_mask : PatLeaf<(build_vector), [{
24 return PPC::isSplatShuffleMask(N);
25}], VSPLT_get_imm>;
26
Chris Lattnerb22a04d2006-03-25 07:51:43 +000027
28// VSPLTISB_get_imm xform function: convert build_vector to VSPLTISB imm.
29def VSPLTISB_get_imm : SDNodeXForm<build_vector, [{
30 char Val;
31 PPC::isVecSplatImm(N, 1, &Val);
32 return getI32Imm(Val);
33}]>;
34def vecspltisb : PatLeaf<(build_vector), [{
35 return PPC::isVecSplatImm(N, 1);
36}], VSPLTISB_get_imm>;
37
38// VSPLTISH_get_imm xform function: convert build_vector to VSPLTISH imm.
39def VSPLTISH_get_imm : SDNodeXForm<build_vector, [{
40 char Val;
41 PPC::isVecSplatImm(N, 2, &Val);
42 return getI32Imm(Val);
43}]>;
44def vecspltish : PatLeaf<(build_vector), [{
45 return PPC::isVecSplatImm(N, 2);
46}], VSPLTISH_get_imm>;
47
48// VSPLTISW_get_imm xform function: convert build_vector to VSPLTISW imm.
49def VSPLTISW_get_imm : SDNodeXForm<build_vector, [{
50 char Val;
51 PPC::isVecSplatImm(N, 4, &Val);
52 return getI32Imm(Val);
53}]>;
54def vecspltisw : PatLeaf<(build_vector), [{
55 return PPC::isVecSplatImm(N, 4);
56}], VSPLTISW_get_imm>;
57
Chris Lattnerb8a45c22006-03-26 04:57:17 +000058class isVDOT { // vector dot instruction.
59 list<Register> Defs = [CR6];
60 bit RC = 1;
61}
Chris Lattnerb22a04d2006-03-25 07:51:43 +000062
63//===----------------------------------------------------------------------===//
64// Instruction Definitions.
65
66def IMPLICIT_DEF_VRRC : Pseudo<(ops VRRC:$rD), "; $rD = IMPLICIT_DEF_VRRC",
67 [(set VRRC:$rD, (v4f32 (undef)))]>;
68
69let isLoad = 1, PPC970_Unit = 2 in { // Loads.
70def LVEBX: XForm_1<31, 7, (ops VRRC:$vD, memrr:$src),
71 "lvebx $vD, $src", LdStGeneral,
72 [(set VRRC:$vD, (v16i8 (PPClve_x xoaddr:$src)))]>;
Chris Lattnere7d959c2006-03-26 00:41:48 +000073def LVEHX: XForm_1<31, 39, (ops VRRC:$vD, memrr:$src),
Chris Lattnerb22a04d2006-03-25 07:51:43 +000074 "lvehx $vD, $src", LdStGeneral,
75 [(set VRRC:$vD, (v8i16 (PPClve_x xoaddr:$src)))]>;
Chris Lattnere7d959c2006-03-26 00:41:48 +000076def LVEWX: XForm_1<31, 71, (ops VRRC:$vD, memrr:$src),
Chris Lattnerb22a04d2006-03-25 07:51:43 +000077 "lvewx $vD, $src", LdStGeneral,
78 [(set VRRC:$vD, (v4f32 (PPClve_x xoaddr:$src)))]>;
Chris Lattnere7d959c2006-03-26 00:41:48 +000079def LVX : XForm_1<31, 103, (ops VRRC:$vD, memrr:$src),
Chris Lattnerb22a04d2006-03-25 07:51:43 +000080 "lvx $vD, $src", LdStGeneral,
81 [(set VRRC:$vD, (v4f32 (load xoaddr:$src)))]>;
82}
83
84def LVSL : XForm_1<31, 6, (ops VRRC:$vD, GPRC:$base, GPRC:$rA),
85 "lvsl $vD, $base, $rA", LdStGeneral,
86 []>, PPC970_Unit_LSU;
87def LVSR : XForm_1<31, 38, (ops VRRC:$vD, GPRC:$base, GPRC:$rA),
88 "lvsl $vD, $base, $rA", LdStGeneral,
89 []>, PPC970_Unit_LSU;
90
91let isStore = 1, noResults = 1, PPC970_Unit = 2 in { // Stores.
92def STVEBX: XForm_8<31, 135, (ops VRRC:$rS, GPRC:$rA, GPRC:$rB),
93 "stvebx $rS, $rA, $rB", LdStGeneral,
94 []>;
95def STVEHX: XForm_8<31, 167, (ops VRRC:$rS, GPRC:$rA, GPRC:$rB),
96 "stvehx $rS, $rA, $rB", LdStGeneral,
97 []>;
98def STVEWX: XForm_8<31, 199, (ops VRRC:$rS, GPRC:$rA, GPRC:$rB),
99 "stvewx $rS, $rA, $rB", LdStGeneral,
100 []>;
101def STVX : XForm_8<31, 231, (ops VRRC:$rS, memrr:$dst),
102 "stvx $rS, $dst", LdStGeneral,
103 [(store (v4f32 VRRC:$rS), xoaddr:$dst)]>;
104}
105
106let PPC970_Unit = 5 in { // VALU Operations.
107// VA-Form instructions. 3-input AltiVec ops.
108def VMADDFP : VAForm_1<46, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
109 "vmaddfp $vD, $vA, $vC, $vB", VecFP,
110 [(set VRRC:$vD, (fadd (fmul VRRC:$vA, VRRC:$vC),
111 VRRC:$vB))]>,
112 Requires<[FPContractions]>;
113def VNMSUBFP: VAForm_1<47, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
114 "vnmsubfp $vD, $vA, $vC, $vB", VecFP,
115 [(set VRRC:$vD, (fneg (fsub (fmul VRRC:$vA, VRRC:$vC),
116 VRRC:$vB)))]>,
117 Requires<[FPContractions]>;
118
119def VPERM : VAForm_1<43, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
120 "vperm $vD, $vA, $vB, $vC", VecPerm,
121 [(set VRRC:$vD,
122 (PPCvperm (v4f32 VRRC:$vA), VRRC:$vB, VRRC:$vC))]>;
Chris Lattnere7d959c2006-03-26 00:41:48 +0000123def VSLDOI : VAForm_2<44, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, u5imm:$SH),
124 "vsldoi $vD, $vA, $vB, $SH", VecFP,
125 [(set VRRC:$vD,
126 (int_ppc_altivec_vsldoi VRRC:$vA, VRRC:$vB,
127 imm:$SH))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000128
129// VX-Form instructions. AltiVec arithmetic ops.
Chris Lattner984f38b2006-03-25 08:01:02 +0000130def VADDCUW : VXForm_1<384, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
131 "vaddcuw $vD, $vA, $vB", VecFP,
132 [(set VRRC:$vD,
133 (int_ppc_altivec_vaddcuw VRRC:$vA, VRRC:$vB))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000134def VADDFP : VXForm_1<10, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
135 "vaddfp $vD, $vA, $vB", VecFP,
136 [(set VRRC:$vD, (fadd VRRC:$vA, VRRC:$vB))]>;
Chris Lattner5d729072006-03-26 02:39:02 +0000137
138def VADDUBM : VXForm_1<0, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
139 "vaddubm $vD, $vA, $vB", VecGeneral,
140 [(set VRRC:$vD, (add (v16i8 VRRC:$vA), VRRC:$vB))]>;
141def VADDUHM : VXForm_1<64, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
142 "vadduhm $vD, $vA, $vB", VecGeneral,
143 [(set VRRC:$vD, (add (v8i16 VRRC:$vA), VRRC:$vB))]>;
144def VADDUWM : VXForm_1<128, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
145 "vadduwm $vD, $vA, $vB", VecGeneral,
146 [(set VRRC:$vD, (add (v4i32 VRRC:$vA), VRRC:$vB))]>;
147
Chris Lattner984f38b2006-03-25 08:01:02 +0000148def VADDSBS : VXForm_1<768, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
149 "vaddsbs $vD, $vA, $vB", VecFP,
150 [(set VRRC:$vD,
151 (int_ppc_altivec_vaddsbs VRRC:$vA, VRRC:$vB))]>;
152def VADDSHS : VXForm_1<832, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
153 "vaddshs $vD, $vA, $vB", VecFP,
154 [(set VRRC:$vD,
155 (int_ppc_altivec_vaddshs VRRC:$vA, VRRC:$vB))]>;
156def VADDSWS : VXForm_1<896, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
157 "vaddsws $vD, $vA, $vB", VecFP,
158 [(set VRRC:$vD,
159 (int_ppc_altivec_vaddsws VRRC:$vA, VRRC:$vB))]>;
Chris Lattner5d729072006-03-26 02:39:02 +0000160
Chris Lattner984f38b2006-03-25 08:01:02 +0000161def VADDUBS : VXForm_1<512, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
162 "vaddubs $vD, $vA, $vB", VecFP,
163 [(set VRRC:$vD,
164 (int_ppc_altivec_vaddubs VRRC:$vA, VRRC:$vB))]>;
165def VADDUHS : VXForm_1<576, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
166 "vadduhs $vD, $vA, $vB", VecFP,
167 [(set VRRC:$vD,
168 (int_ppc_altivec_vadduhs VRRC:$vA, VRRC:$vB))]>;
Chris Lattner984f38b2006-03-25 08:01:02 +0000169def VADDUWS : VXForm_1<640, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
170 "vadduws $vD, $vA, $vB", VecFP,
171 [(set VRRC:$vD,
172 (int_ppc_altivec_vadduws VRRC:$vA, VRRC:$vB))]>;
Chris Lattner2430a5f2006-03-25 22:16:05 +0000173def VAND : VXForm_1<1028, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
174 "vand $vD, $vA, $vB", VecFP,
175 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), VRRC:$vB))]>;
176def VANDC : VXForm_1<1092, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
177 "vandc $vD, $vA, $vB", VecFP,
Chris Lattneraf9136b2006-03-25 23:10:40 +0000178 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), (vnot VRRC:$vB)))]>;
Chris Lattner2430a5f2006-03-25 22:16:05 +0000179
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000180def VCFSX : VXForm_1<842, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
181 "vcfsx $vD, $vB, $UIMM", VecFP,
Chris Lattner984f38b2006-03-25 08:01:02 +0000182 [(set VRRC:$vD,
183 (int_ppc_altivec_vcfsx VRRC:$vB, imm:$UIMM))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000184def VCFUX : VXForm_1<778, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
185 "vcfux $vD, $vB, $UIMM", VecFP,
Chris Lattner984f38b2006-03-25 08:01:02 +0000186 [(set VRRC:$vD,
187 (int_ppc_altivec_vcfux VRRC:$vB, imm:$UIMM))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000188def VCTSXS : VXForm_1<970, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
189 "vctsxs $vD, $vB, $UIMM", VecFP,
190 []>;
191def VCTUXS : VXForm_1<906, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
192 "vctuxs $vD, $vB, $UIMM", VecFP,
193 []>;
194def VEXPTEFP : VXForm_2<394, (ops VRRC:$vD, VRRC:$vB),
195 "vexptefp $vD, $vB", VecFP,
196 []>;
197def VLOGEFP : VXForm_2<458, (ops VRRC:$vD, VRRC:$vB),
198 "vlogefp $vD, $vB", VecFP,
199 []>;
200def VMAXFP : VXForm_1<1034, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
201 "vmaxfp $vD, $vA, $vB", VecFP,
202 []>;
203def VMINFP : VXForm_1<1098, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
204 "vminfp $vD, $vA, $vB", VecFP,
205 []>;
206def VREFP : VXForm_2<266, (ops VRRC:$vD, VRRC:$vB),
207 "vrefp $vD, $vB", VecFP,
208 []>;
209def VRFIM : VXForm_2<714, (ops VRRC:$vD, VRRC:$vB),
210 "vrfim $vD, $vB", VecFP,
211 []>;
212def VRFIN : VXForm_2<522, (ops VRRC:$vD, VRRC:$vB),
213 "vrfin $vD, $vB", VecFP,
214 []>;
215def VRFIP : VXForm_2<650, (ops VRRC:$vD, VRRC:$vB),
216 "vrfip $vD, $vB", VecFP,
217 []>;
218def VRFIZ : VXForm_2<586, (ops VRRC:$vD, VRRC:$vB),
219 "vrfiz $vD, $vB", VecFP,
220 []>;
221def VRSQRTEFP : VXForm_2<330, (ops VRRC:$vD, VRRC:$vB),
222 "vrsqrtefp $vD, $vB", VecFP,
Chris Lattner984f38b2006-03-25 08:01:02 +0000223 [(set VRRC:$vD,(int_ppc_altivec_vrsqrtefp VRRC:$vB))]>;
Chris Lattner5d729072006-03-26 02:39:02 +0000224def VSUBCUW : VXForm_1<74, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
225 "vsubcuw $vD, $vA, $vB", VecFP,
226 [(set VRRC:$vD,
227 (int_ppc_altivec_vsubcuw VRRC:$vA, VRRC:$vB))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000228def VSUBFP : VXForm_1<74, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
229 "vsubfp $vD, $vA, $vB", VecFP,
230 [(set VRRC:$vD, (fsub VRRC:$vA, VRRC:$vB))]>;
Chris Lattner5d729072006-03-26 02:39:02 +0000231
232def VSUBUBM : VXForm_1<1024, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
233 "vsububm $vD, $vA, $vB", VecGeneral,
234 [(set VRRC:$vD, (sub (v16i8 VRRC:$vA), VRRC:$vB))]>;
235def VSUBUHM : VXForm_1<1088, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
236 "vsubuhm $vD, $vA, $vB", VecGeneral,
237 [(set VRRC:$vD, (sub (v8i16 VRRC:$vA), VRRC:$vB))]>;
238def VSUBUWM : VXForm_1<1152, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
239 "vsubuwm $vD, $vA, $vB", VecGeneral,
240 [(set VRRC:$vD, (sub (v4i32 VRRC:$vA), VRRC:$vB))]>;
241
242def VSUBSBS : VXForm_1<1792, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
243 "vsubsbs $vD, $vA, $vB", VecFP,
244 [(set VRRC:$vD,
245 (int_ppc_altivec_vsubsbs VRRC:$vA, VRRC:$vB))]>;
246def VSUBSHS : VXForm_1<1856, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
247 "vsubshs $vD, $vA, $vB", VecFP,
248 [(set VRRC:$vD,
249 (int_ppc_altivec_vsubshs VRRC:$vA, VRRC:$vB))]>;
250def VSUBSWS : VXForm_1<1920, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
251 "vsubsws $vD, $vA, $vB", VecFP,
252 [(set VRRC:$vD,
253 (int_ppc_altivec_vsubsws VRRC:$vA, VRRC:$vB))]>;
254
255def VSUBUBS : VXForm_1<1536, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
256 "vsububs $vD, $vA, $vB", VecFP,
257 [(set VRRC:$vD,
258 (int_ppc_altivec_vsububs VRRC:$vA, VRRC:$vB))]>;
259def VSUBUHS : VXForm_1<1600, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
260 "vsubuhs $vD, $vA, $vB", VecFP,
261 [(set VRRC:$vD,
262 (int_ppc_altivec_vsubuhs VRRC:$vA, VRRC:$vB))]>;
263def VSUBUWS : VXForm_1<1664, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
264 "vsubuws $vD, $vA, $vB", VecFP,
265 [(set VRRC:$vD,
266 (int_ppc_altivec_vsubuws VRRC:$vA, VRRC:$vB))]>;
267
Chris Lattner2430a5f2006-03-25 22:16:05 +0000268def VNOR : VXForm_1<1284, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
269 "vnor $vD, $vA, $vB", VecFP,
Chris Lattner6509ae82006-03-25 23:05:29 +0000270 [(set VRRC:$vD, (vnot (or (v4i32 VRRC:$vA), VRRC:$vB)))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000271def VOR : VXForm_1<1156, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
272 "vor $vD, $vA, $vB", VecFP,
Chris Lattner2430a5f2006-03-25 22:16:05 +0000273 [(set VRRC:$vD, (or (v4i32 VRRC:$vA), VRRC:$vB))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000274def VXOR : VXForm_1<1220, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
275 "vxor $vD, $vA, $vB", VecFP,
Chris Lattner2430a5f2006-03-25 22:16:05 +0000276 [(set VRRC:$vD, (xor (v4i32 VRRC:$vA), VRRC:$vB))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000277
278def VSPLTB : VXForm_1<524, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
279 "vspltb $vD, $vB, $UIMM", VecPerm,
280 []>;
281def VSPLTH : VXForm_1<588, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
282 "vsplth $vD, $vB, $UIMM", VecPerm,
283 []>;
284def VSPLTW : VXForm_1<652, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
285 "vspltw $vD, $vB, $UIMM", VecPerm,
286 [(set VRRC:$vD, (vector_shuffle (v4f32 VRRC:$vB), (undef),
287 VSPLT_shuffle_mask:$UIMM))]>;
288
289def VSPLTISB : VXForm_1<780, (ops VRRC:$vD, s5imm:$SIMM),
290 "vspltisb $vD, $SIMM", VecPerm,
291 [(set VRRC:$vD, (v4f32 vecspltisb:$SIMM))]>;
292def VSPLTISH : VXForm_1<844, (ops VRRC:$vD, s5imm:$SIMM),
293 "vspltish $vD, $SIMM", VecPerm,
294 [(set VRRC:$vD, (v4f32 vecspltish:$SIMM))]>;
295def VSPLTISW : VXForm_1<908, (ops VRRC:$vD, s5imm:$SIMM),
296 "vspltisw $vD, $SIMM", VecPerm,
297 [(set VRRC:$vD, (v4f32 vecspltisw:$SIMM))]>;
298
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000299
Chris Lattnerb8a45c22006-03-26 04:57:17 +0000300// Altivec Comparisons.
301
302// f32 element comparisons.
303def VCMPBFP : VXRForm_1<966, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
304 "vcmpbfp $vD, $vA, $vB", VecFPCompare,
305 [(set VRRC:$vD,
306 (int_ppc_altivec_vcmpbfp VRRC:$vA, VRRC:$vB))]>;
307def VCMPBFPo : VXRForm_1<966, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
308 "vcmpbfp. $vD, $vA, $vB", VecFPCompare,
309 []>, isVDOT;
310def VCMPEQFP : VXRForm_1<198, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
311 "vcmpeqfp $vD, $vA, $vB", VecFPCompare,
312 [(set VRRC:$vD,
313 (int_ppc_altivec_vcmpeqfp VRRC:$vA, VRRC:$vB))]>;
314def VCMPEQFPo : VXRForm_1<198, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
315 "vcmpeqfp. $vD, $vA, $vB", VecFPCompare,
316 []>, isVDOT;
317def VCMPGEFP : VXRForm_1<454, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
318 "vcmpgefp $vD, $vA, $vB", VecFPCompare,
319 [(set VRRC:$vD,
320 (int_ppc_altivec_vcmpgefp VRRC:$vA, VRRC:$vB))]>;
321def VCMPGEFPo : VXRForm_1<454, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
322 "vcmpgefp. $vD, $vA, $vB", VecFPCompare,
323 []>, isVDOT;
324def VCMPGTFP : VXRForm_1<710, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
325 "vcmpgtfp $vD, $vA, $vB", VecFPCompare,
326 [(set VRRC:$vD,
327 (int_ppc_altivec_vcmpgtfp VRRC:$vA, VRRC:$vB))]>;
328def VCMPGTFPo : VXRForm_1<710, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
329 "vcmpgtfp. $vD, $vA, $vB", VecFPCompare,
330 []>, isVDOT;
331
332// i8 element comparisons.
333def VCMPEQUB : VXRForm_1<6, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
334 "vcmpequb $vD, $vA, $vB", VecFPCompare,
335 [(set VRRC:$vD,
336 (int_ppc_altivec_vcmpequb VRRC:$vA, VRRC:$vB))]>;
337def VCMPEQUBo : VXRForm_1<6, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
338 "vcmpequb. $vD, $vA, $vB", VecFPCompare,
339 []>, isVDOT;
340def VCMPGTSB : VXRForm_1<774, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
341 "vcmpgtsb $vD, $vA, $vB", VecFPCompare,
342 [(set VRRC:$vD,
343 (int_ppc_altivec_vcmpgtsb VRRC:$vA, VRRC:$vB))]>;
344def VCMPGTSBo : VXRForm_1<774, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
345 "vcmpgtsb. $vD, $vA, $vB", VecFPCompare,
346 []>, isVDOT;
347def VCMPGTUB : VXRForm_1<518, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
348 "vcmpgtub $vD, $vA, $vB", VecFPCompare,
349 [(set VRRC:$vD,
350 (int_ppc_altivec_vcmpgtub VRRC:$vA, VRRC:$vB))]>;
351def VCMPGTUBo : VXRForm_1<518, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
352 "vcmpgtub. $vD, $vA, $vB", VecFPCompare,
353 []>, isVDOT;
354
355// i16 element comparisons.
356def VCMPEQUH : VXRForm_1<70, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
357 "vcmpequh $vD, $vA, $vB", VecFPCompare,
358 [(set VRRC:$vD,
359 (int_ppc_altivec_vcmpequh VRRC:$vA, VRRC:$vB))]>;
360def VCMPEQUHo : VXRForm_1<70, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
361 "vcmpequh. $vD, $vA, $vB", VecFPCompare,
362 []>, isVDOT;
363def VCMPGTSH : VXRForm_1<838, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
364 "vcmpgtsh $vD, $vA, $vB", VecFPCompare,
365 [(set VRRC:$vD,
366 (int_ppc_altivec_vcmpgtsh VRRC:$vA, VRRC:$vB))]>;
367def VCMPGTSHo : VXRForm_1<838, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
368 "vcmpgtsh. $vD, $vA, $vB", VecFPCompare,
369 []>, isVDOT;
370def VCMPGTUH : VXRForm_1<582, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
371 "vcmpgtuh $vD, $vA, $vB", VecFPCompare,
372 [(set VRRC:$vD,
373 (int_ppc_altivec_vcmpgtuh VRRC:$vA, VRRC:$vB))]>;
374def VCMPGTUHo : VXRForm_1<582, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
375 "vcmpgtuh. $vD, $vA, $vB", VecFPCompare,
376 []>, isVDOT;
377
378// i32 element comparisons.
379def VCMPEQUW : VXRForm_1<134, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
380 "vcmpequw $vD, $vA, $vB", VecFPCompare,
381 [(set VRRC:$vD,
382 (int_ppc_altivec_vcmpequw VRRC:$vA, VRRC:$vB))]>;
383def VCMPEQUWo : VXRForm_1<134, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
384 "vcmpequw. $vD, $vA, $vB", VecFPCompare,
385 []>, isVDOT;
386def VCMPGTSW : VXRForm_1<902, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
387 "vcmpgtsw $vD, $vA, $vB", VecFPCompare,
388 [(set VRRC:$vD,
389 (int_ppc_altivec_vcmpgtsw VRRC:$vA, VRRC:$vB))]>;
390def VCMPGTSWo : VXRForm_1<902, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
391 "vcmpgtsw. $vD, $vA, $vB", VecFPCompare,
392 []>, isVDOT;
393def VCMPGTUW : VXRForm_1<646, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
394 "vcmpgtuw $vD, $vA, $vB", VecFPCompare,
395 [(set VRRC:$vD,
396 (int_ppc_altivec_vcmpgtuw VRRC:$vA, VRRC:$vB))]>;
397def VCMPGTUWo : VXRForm_1<646, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
398 "vcmpgtuw. $vD, $vA, $vB", VecFPCompare,
399 []>, isVDOT;
400
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000401def V_SET0 : VXForm_setzero<1220, (ops VRRC:$vD),
402 "vxor $vD, $vD, $vD", VecFP,
Evan Cheng5b6a01b2006-03-26 09:52:32 +0000403 [(set VRRC:$vD, (v4f32 immAllZerosV))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000404}
405
406//===----------------------------------------------------------------------===//
407// Additional Altivec Patterns
408//
409
410// Undef/Zero.
411def : Pat<(v16i8 (undef)), (v16i8 (IMPLICIT_DEF_VRRC))>;
412def : Pat<(v8i16 (undef)), (v8i16 (IMPLICIT_DEF_VRRC))>;
413def : Pat<(v4i32 (undef)), (v4i32 (IMPLICIT_DEF_VRRC))>;
Evan Cheng5b6a01b2006-03-26 09:52:32 +0000414def : Pat<(v16i8 immAllZerosV), (v16i8 (V_SET0))>;
415def : Pat<(v8i16 immAllZerosV), (v8i16 (V_SET0))>;
416def : Pat<(v4i32 immAllZerosV), (v4i32 (V_SET0))>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000417
418// Loads.
419def : Pat<(v16i8 (load xoaddr:$src)), (v16i8 (LVX xoaddr:$src))>;
420def : Pat<(v8i16 (load xoaddr:$src)), (v8i16 (LVX xoaddr:$src))>;
421def : Pat<(v4i32 (load xoaddr:$src)), (v4i32 (LVX xoaddr:$src))>;
422
423// Stores.
424def : Pat<(store (v16i8 VRRC:$rS), xoaddr:$dst),
425 (STVX (v16i8 VRRC:$rS), xoaddr:$dst)>;
426def : Pat<(store (v8i16 VRRC:$rS), xoaddr:$dst),
427 (STVX (v8i16 VRRC:$rS), xoaddr:$dst)>;
428def : Pat<(store (v4i32 VRRC:$rS), xoaddr:$dst),
429 (STVX (v4i32 VRRC:$rS), xoaddr:$dst)>;
430
431// Bit conversions.
432def : Pat<(v16i8 (bitconvert (v8i16 VRRC:$src))), (v16i8 VRRC:$src)>;
433def : Pat<(v16i8 (bitconvert (v4i32 VRRC:$src))), (v16i8 VRRC:$src)>;
434def : Pat<(v16i8 (bitconvert (v4f32 VRRC:$src))), (v16i8 VRRC:$src)>;
435
436def : Pat<(v8i16 (bitconvert (v16i8 VRRC:$src))), (v8i16 VRRC:$src)>;
437def : Pat<(v8i16 (bitconvert (v4i32 VRRC:$src))), (v8i16 VRRC:$src)>;
438def : Pat<(v8i16 (bitconvert (v4f32 VRRC:$src))), (v8i16 VRRC:$src)>;
439
440def : Pat<(v4i32 (bitconvert (v16i8 VRRC:$src))), (v4i32 VRRC:$src)>;
441def : Pat<(v4i32 (bitconvert (v8i16 VRRC:$src))), (v4i32 VRRC:$src)>;
442def : Pat<(v4i32 (bitconvert (v4f32 VRRC:$src))), (v4i32 VRRC:$src)>;
443
444def : Pat<(v4f32 (bitconvert (v16i8 VRRC:$src))), (v4f32 VRRC:$src)>;
445def : Pat<(v4f32 (bitconvert (v8i16 VRRC:$src))), (v4f32 VRRC:$src)>;
446def : Pat<(v4f32 (bitconvert (v4i32 VRRC:$src))), (v4f32 VRRC:$src)>;
447
448// Immediate vector formation with vsplti*.
449def : Pat<(v16i8 vecspltisb:$invec), (v16i8 (VSPLTISB vecspltisb:$invec))>;
450def : Pat<(v16i8 vecspltish:$invec), (v16i8 (VSPLTISH vecspltish:$invec))>;
451def : Pat<(v16i8 vecspltisw:$invec), (v16i8 (VSPLTISW vecspltisw:$invec))>;
452
453def : Pat<(v8i16 vecspltisb:$invec), (v8i16 (VSPLTISB vecspltisb:$invec))>;
454def : Pat<(v8i16 vecspltish:$invec), (v8i16 (VSPLTISH vecspltish:$invec))>;
455def : Pat<(v8i16 vecspltisw:$invec), (v8i16 (VSPLTISW vecspltisw:$invec))>;
456
457def : Pat<(v4i32 vecspltisb:$invec), (v4i32 (VSPLTISB vecspltisb:$invec))>;
458def : Pat<(v4i32 vecspltish:$invec), (v4i32 (VSPLTISH vecspltish:$invec))>;
459def : Pat<(v4i32 vecspltisw:$invec), (v4i32 (VSPLTISW vecspltisw:$invec))>;
460
Chris Lattner2430a5f2006-03-25 22:16:05 +0000461// Logical Operations
462def : Pat<(v16i8 (and VRRC:$A, VRRC:$B)), (v16i8 (VAND VRRC:$A, VRRC:$B))>;
463def : Pat<(v8i16 (and VRRC:$A, VRRC:$B)), (v8i16 (VAND VRRC:$A, VRRC:$B))>;
464def : Pat<(v16i8 (or VRRC:$A, VRRC:$B)), (v16i8 (VOR VRRC:$A, VRRC:$B))>;
465def : Pat<(v8i16 (or VRRC:$A, VRRC:$B)), (v8i16 (VOR VRRC:$A, VRRC:$B))>;
466def : Pat<(v16i8 (xor VRRC:$A, VRRC:$B)), (v16i8 (VXOR VRRC:$A, VRRC:$B))>;
467def : Pat<(v8i16 (xor VRRC:$A, VRRC:$B)), (v8i16 (VXOR VRRC:$A, VRRC:$B))>;
Chris Lattner6509ae82006-03-25 23:05:29 +0000468def : Pat<(v16i8 (vnot (or VRRC:$A, VRRC:$B))),(v16i8 (VNOR VRRC:$A, VRRC:$B))>;
469def : Pat<(v8i16 (vnot (or VRRC:$A, VRRC:$B))),(v8i16 (VNOR VRRC:$A, VRRC:$B))>;
Chris Lattneraf9136b2006-03-25 23:10:40 +0000470def : Pat<(v16i8 (and VRRC:$A, (vnot VRRC:$B))),
Chris Lattner6509ae82006-03-25 23:05:29 +0000471 (v16i8 (VANDC VRRC:$A, VRRC:$B))>;
Chris Lattneraf9136b2006-03-25 23:10:40 +0000472def : Pat<(v8i16 (and VRRC:$A, (vnot VRRC:$B))),
Chris Lattner6509ae82006-03-25 23:05:29 +0000473 (v8i16 (VANDC VRRC:$A, VRRC:$B))>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000474
475def : Pat<(fmul VRRC:$vA, VRRC:$vB),
476 (VMADDFP VRRC:$vA, VRRC:$vB, (V_SET0))>;
477
478// Fused multiply add and multiply sub for packed float. These are represented
479// separately from the real instructions above, for operations that must have
480// the additional precision, such as Newton-Rhapson (used by divide, sqrt)
481def : Pat<(PPCvmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
482 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
483def : Pat<(PPCvnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
484 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
485
486def : Pat<(int_ppc_altivec_vmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
487 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
488def : Pat<(int_ppc_altivec_vnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
489 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
490
491def : Pat<(vector_shuffle (v4i32 VRRC:$vB), (undef), VSPLT_shuffle_mask:$UIMM),
492 (v4i32 (VSPLTW VSPLT_shuffle_mask:$UIMM, VRRC:$vB))>;
493
494def : Pat<(PPCvperm (v4i32 VRRC:$vA), VRRC:$vB, VRRC:$vC),
495 (v4i32 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>;
496
497def : Pat<(v4i32 (PPClve_x xoaddr:$src)),
498 (v4i32 (LVEWX xoaddr:$src))>;
499
500