blob: 58e460469084c75309a352b2a79f3740a3c9c249 [file] [log] [blame]
Chris Lattnerb22a04d2006-03-25 07:51:43 +00001//===- PPCInstrAltivec.td - The PowerPC Altivec Extension --*- tablegen -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by Chris Lattner and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file describes the Altivec extension to the PowerPC instruction set.
11//
12//===----------------------------------------------------------------------===//
13
14//===----------------------------------------------------------------------===//
15// Altivec transformation functions and pattern fragments.
16//
17
18// VSPLT_get_imm xform function: convert vector_shuffle mask to VSPLT* imm.
19def VSPLT_get_imm : SDNodeXForm<build_vector, [{
20 return getI32Imm(PPC::getVSPLTImmediate(N));
21}]>;
22
23def VSPLT_shuffle_mask : PatLeaf<(build_vector), [{
24 return PPC::isSplatShuffleMask(N);
25}], VSPLT_get_imm>;
26
27def vecimm0 : PatLeaf<(build_vector), [{
28 return PPC::isZeroVector(N);
29}]>;
30
31
32// VSPLTISB_get_imm xform function: convert build_vector to VSPLTISB imm.
33def VSPLTISB_get_imm : SDNodeXForm<build_vector, [{
34 char Val;
35 PPC::isVecSplatImm(N, 1, &Val);
36 return getI32Imm(Val);
37}]>;
38def vecspltisb : PatLeaf<(build_vector), [{
39 return PPC::isVecSplatImm(N, 1);
40}], VSPLTISB_get_imm>;
41
42// VSPLTISH_get_imm xform function: convert build_vector to VSPLTISH imm.
43def VSPLTISH_get_imm : SDNodeXForm<build_vector, [{
44 char Val;
45 PPC::isVecSplatImm(N, 2, &Val);
46 return getI32Imm(Val);
47}]>;
48def vecspltish : PatLeaf<(build_vector), [{
49 return PPC::isVecSplatImm(N, 2);
50}], VSPLTISH_get_imm>;
51
52// VSPLTISW_get_imm xform function: convert build_vector to VSPLTISW imm.
53def VSPLTISW_get_imm : SDNodeXForm<build_vector, [{
54 char Val;
55 PPC::isVecSplatImm(N, 4, &Val);
56 return getI32Imm(Val);
57}]>;
58def vecspltisw : PatLeaf<(build_vector), [{
59 return PPC::isVecSplatImm(N, 4);
60}], VSPLTISW_get_imm>;
61
62
63
64//===----------------------------------------------------------------------===//
65// Instruction Definitions.
66
67def IMPLICIT_DEF_VRRC : Pseudo<(ops VRRC:$rD), "; $rD = IMPLICIT_DEF_VRRC",
68 [(set VRRC:$rD, (v4f32 (undef)))]>;
69
70let isLoad = 1, PPC970_Unit = 2 in { // Loads.
71def LVEBX: XForm_1<31, 7, (ops VRRC:$vD, memrr:$src),
72 "lvebx $vD, $src", LdStGeneral,
73 [(set VRRC:$vD, (v16i8 (PPClve_x xoaddr:$src)))]>;
Chris Lattnere7d959c2006-03-26 00:41:48 +000074def LVEHX: XForm_1<31, 39, (ops VRRC:$vD, memrr:$src),
Chris Lattnerb22a04d2006-03-25 07:51:43 +000075 "lvehx $vD, $src", LdStGeneral,
76 [(set VRRC:$vD, (v8i16 (PPClve_x xoaddr:$src)))]>;
Chris Lattnere7d959c2006-03-26 00:41:48 +000077def LVEWX: XForm_1<31, 71, (ops VRRC:$vD, memrr:$src),
Chris Lattnerb22a04d2006-03-25 07:51:43 +000078 "lvewx $vD, $src", LdStGeneral,
79 [(set VRRC:$vD, (v4f32 (PPClve_x xoaddr:$src)))]>;
Chris Lattnere7d959c2006-03-26 00:41:48 +000080def LVX : XForm_1<31, 103, (ops VRRC:$vD, memrr:$src),
Chris Lattnerb22a04d2006-03-25 07:51:43 +000081 "lvx $vD, $src", LdStGeneral,
82 [(set VRRC:$vD, (v4f32 (load xoaddr:$src)))]>;
83}
84
85def LVSL : XForm_1<31, 6, (ops VRRC:$vD, GPRC:$base, GPRC:$rA),
86 "lvsl $vD, $base, $rA", LdStGeneral,
87 []>, PPC970_Unit_LSU;
88def LVSR : XForm_1<31, 38, (ops VRRC:$vD, GPRC:$base, GPRC:$rA),
89 "lvsl $vD, $base, $rA", LdStGeneral,
90 []>, PPC970_Unit_LSU;
91
92let isStore = 1, noResults = 1, PPC970_Unit = 2 in { // Stores.
93def STVEBX: XForm_8<31, 135, (ops VRRC:$rS, GPRC:$rA, GPRC:$rB),
94 "stvebx $rS, $rA, $rB", LdStGeneral,
95 []>;
96def STVEHX: XForm_8<31, 167, (ops VRRC:$rS, GPRC:$rA, GPRC:$rB),
97 "stvehx $rS, $rA, $rB", LdStGeneral,
98 []>;
99def STVEWX: XForm_8<31, 199, (ops VRRC:$rS, GPRC:$rA, GPRC:$rB),
100 "stvewx $rS, $rA, $rB", LdStGeneral,
101 []>;
102def STVX : XForm_8<31, 231, (ops VRRC:$rS, memrr:$dst),
103 "stvx $rS, $dst", LdStGeneral,
104 [(store (v4f32 VRRC:$rS), xoaddr:$dst)]>;
105}
106
107let PPC970_Unit = 5 in { // VALU Operations.
108// VA-Form instructions. 3-input AltiVec ops.
109def VMADDFP : VAForm_1<46, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
110 "vmaddfp $vD, $vA, $vC, $vB", VecFP,
111 [(set VRRC:$vD, (fadd (fmul VRRC:$vA, VRRC:$vC),
112 VRRC:$vB))]>,
113 Requires<[FPContractions]>;
114def VNMSUBFP: VAForm_1<47, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
115 "vnmsubfp $vD, $vA, $vC, $vB", VecFP,
116 [(set VRRC:$vD, (fneg (fsub (fmul VRRC:$vA, VRRC:$vC),
117 VRRC:$vB)))]>,
118 Requires<[FPContractions]>;
119
120def VPERM : VAForm_1<43, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
121 "vperm $vD, $vA, $vB, $vC", VecPerm,
122 [(set VRRC:$vD,
123 (PPCvperm (v4f32 VRRC:$vA), VRRC:$vB, VRRC:$vC))]>;
Chris Lattnere7d959c2006-03-26 00:41:48 +0000124def VSLDOI : VAForm_2<44, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, u5imm:$SH),
125 "vsldoi $vD, $vA, $vB, $SH", VecFP,
126 [(set VRRC:$vD,
127 (int_ppc_altivec_vsldoi VRRC:$vA, VRRC:$vB,
128 imm:$SH))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000129
130// VX-Form instructions. AltiVec arithmetic ops.
Chris Lattner984f38b2006-03-25 08:01:02 +0000131def VADDCUW : VXForm_1<384, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
132 "vaddcuw $vD, $vA, $vB", VecFP,
133 [(set VRRC:$vD,
134 (int_ppc_altivec_vaddcuw VRRC:$vA, VRRC:$vB))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000135def VADDFP : VXForm_1<10, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
136 "vaddfp $vD, $vA, $vB", VecFP,
137 [(set VRRC:$vD, (fadd VRRC:$vA, VRRC:$vB))]>;
Chris Lattner984f38b2006-03-25 08:01:02 +0000138def VADDSBS : VXForm_1<768, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
139 "vaddsbs $vD, $vA, $vB", VecFP,
140 [(set VRRC:$vD,
141 (int_ppc_altivec_vaddsbs VRRC:$vA, VRRC:$vB))]>;
142def VADDSHS : VXForm_1<832, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
143 "vaddshs $vD, $vA, $vB", VecFP,
144 [(set VRRC:$vD,
145 (int_ppc_altivec_vaddshs VRRC:$vA, VRRC:$vB))]>;
146def VADDSWS : VXForm_1<896, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
147 "vaddsws $vD, $vA, $vB", VecFP,
148 [(set VRRC:$vD,
149 (int_ppc_altivec_vaddsws VRRC:$vA, VRRC:$vB))]>;
150def VADDUBS : VXForm_1<512, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
151 "vaddubs $vD, $vA, $vB", VecFP,
152 [(set VRRC:$vD,
153 (int_ppc_altivec_vaddubs VRRC:$vA, VRRC:$vB))]>;
154def VADDUHS : VXForm_1<576, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
155 "vadduhs $vD, $vA, $vB", VecFP,
156 [(set VRRC:$vD,
157 (int_ppc_altivec_vadduhs VRRC:$vA, VRRC:$vB))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000158def VADDUWM : VXForm_1<128, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
159 "vadduwm $vD, $vA, $vB", VecGeneral,
160 [(set VRRC:$vD, (add (v4i32 VRRC:$vA), VRRC:$vB))]>;
Chris Lattner984f38b2006-03-25 08:01:02 +0000161def VADDUWS : VXForm_1<640, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
162 "vadduws $vD, $vA, $vB", VecFP,
163 [(set VRRC:$vD,
164 (int_ppc_altivec_vadduws VRRC:$vA, VRRC:$vB))]>;
Chris Lattner2430a5f2006-03-25 22:16:05 +0000165def VAND : VXForm_1<1028, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
166 "vand $vD, $vA, $vB", VecFP,
167 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), VRRC:$vB))]>;
168def VANDC : VXForm_1<1092, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
169 "vandc $vD, $vA, $vB", VecFP,
Chris Lattneraf9136b2006-03-25 23:10:40 +0000170 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), (vnot VRRC:$vB)))]>;
Chris Lattner2430a5f2006-03-25 22:16:05 +0000171
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000172def VCFSX : VXForm_1<842, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
173 "vcfsx $vD, $vB, $UIMM", VecFP,
Chris Lattner984f38b2006-03-25 08:01:02 +0000174 [(set VRRC:$vD,
175 (int_ppc_altivec_vcfsx VRRC:$vB, imm:$UIMM))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000176def VCFUX : VXForm_1<778, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
177 "vcfux $vD, $vB, $UIMM", VecFP,
Chris Lattner984f38b2006-03-25 08:01:02 +0000178 [(set VRRC:$vD,
179 (int_ppc_altivec_vcfux VRRC:$vB, imm:$UIMM))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000180def VCTSXS : VXForm_1<970, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
181 "vctsxs $vD, $vB, $UIMM", VecFP,
182 []>;
183def VCTUXS : VXForm_1<906, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
184 "vctuxs $vD, $vB, $UIMM", VecFP,
185 []>;
186def VEXPTEFP : VXForm_2<394, (ops VRRC:$vD, VRRC:$vB),
187 "vexptefp $vD, $vB", VecFP,
188 []>;
189def VLOGEFP : VXForm_2<458, (ops VRRC:$vD, VRRC:$vB),
190 "vlogefp $vD, $vB", VecFP,
191 []>;
192def VMAXFP : VXForm_1<1034, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
193 "vmaxfp $vD, $vA, $vB", VecFP,
194 []>;
195def VMINFP : VXForm_1<1098, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
196 "vminfp $vD, $vA, $vB", VecFP,
197 []>;
198def VREFP : VXForm_2<266, (ops VRRC:$vD, VRRC:$vB),
199 "vrefp $vD, $vB", VecFP,
200 []>;
201def VRFIM : VXForm_2<714, (ops VRRC:$vD, VRRC:$vB),
202 "vrfim $vD, $vB", VecFP,
203 []>;
204def VRFIN : VXForm_2<522, (ops VRRC:$vD, VRRC:$vB),
205 "vrfin $vD, $vB", VecFP,
206 []>;
207def VRFIP : VXForm_2<650, (ops VRRC:$vD, VRRC:$vB),
208 "vrfip $vD, $vB", VecFP,
209 []>;
210def VRFIZ : VXForm_2<586, (ops VRRC:$vD, VRRC:$vB),
211 "vrfiz $vD, $vB", VecFP,
212 []>;
213def VRSQRTEFP : VXForm_2<330, (ops VRRC:$vD, VRRC:$vB),
214 "vrsqrtefp $vD, $vB", VecFP,
Chris Lattner984f38b2006-03-25 08:01:02 +0000215 [(set VRRC:$vD,(int_ppc_altivec_vrsqrtefp VRRC:$vB))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000216def VSUBFP : VXForm_1<74, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
217 "vsubfp $vD, $vA, $vB", VecFP,
218 [(set VRRC:$vD, (fsub VRRC:$vA, VRRC:$vB))]>;
Chris Lattner2430a5f2006-03-25 22:16:05 +0000219def VNOR : VXForm_1<1284, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
220 "vnor $vD, $vA, $vB", VecFP,
Chris Lattner6509ae82006-03-25 23:05:29 +0000221 [(set VRRC:$vD, (vnot (or (v4i32 VRRC:$vA), VRRC:$vB)))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000222def VOR : VXForm_1<1156, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
223 "vor $vD, $vA, $vB", VecFP,
Chris Lattner2430a5f2006-03-25 22:16:05 +0000224 [(set VRRC:$vD, (or (v4i32 VRRC:$vA), VRRC:$vB))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000225def VXOR : VXForm_1<1220, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
226 "vxor $vD, $vA, $vB", VecFP,
Chris Lattner2430a5f2006-03-25 22:16:05 +0000227 [(set VRRC:$vD, (xor (v4i32 VRRC:$vA), VRRC:$vB))]>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000228
229def VSPLTB : VXForm_1<524, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
230 "vspltb $vD, $vB, $UIMM", VecPerm,
231 []>;
232def VSPLTH : VXForm_1<588, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
233 "vsplth $vD, $vB, $UIMM", VecPerm,
234 []>;
235def VSPLTW : VXForm_1<652, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
236 "vspltw $vD, $vB, $UIMM", VecPerm,
237 [(set VRRC:$vD, (vector_shuffle (v4f32 VRRC:$vB), (undef),
238 VSPLT_shuffle_mask:$UIMM))]>;
239
240def VSPLTISB : VXForm_1<780, (ops VRRC:$vD, s5imm:$SIMM),
241 "vspltisb $vD, $SIMM", VecPerm,
242 [(set VRRC:$vD, (v4f32 vecspltisb:$SIMM))]>;
243def VSPLTISH : VXForm_1<844, (ops VRRC:$vD, s5imm:$SIMM),
244 "vspltish $vD, $SIMM", VecPerm,
245 [(set VRRC:$vD, (v4f32 vecspltish:$SIMM))]>;
246def VSPLTISW : VXForm_1<908, (ops VRRC:$vD, s5imm:$SIMM),
247 "vspltisw $vD, $SIMM", VecPerm,
248 [(set VRRC:$vD, (v4f32 vecspltisw:$SIMM))]>;
249
250
251// VX-Form Pseudo Instructions
252
253def V_SET0 : VXForm_setzero<1220, (ops VRRC:$vD),
254 "vxor $vD, $vD, $vD", VecFP,
255 [(set VRRC:$vD, (v4f32 vecimm0))]>;
256}
257
258//===----------------------------------------------------------------------===//
259// Additional Altivec Patterns
260//
261
262// Undef/Zero.
263def : Pat<(v16i8 (undef)), (v16i8 (IMPLICIT_DEF_VRRC))>;
264def : Pat<(v8i16 (undef)), (v8i16 (IMPLICIT_DEF_VRRC))>;
265def : Pat<(v4i32 (undef)), (v4i32 (IMPLICIT_DEF_VRRC))>;
266def : Pat<(v16i8 vecimm0), (v16i8 (V_SET0))>;
267def : Pat<(v8i16 vecimm0), (v8i16 (V_SET0))>;
268def : Pat<(v4i32 vecimm0), (v4i32 (V_SET0))>;
269
270// Loads.
271def : Pat<(v16i8 (load xoaddr:$src)), (v16i8 (LVX xoaddr:$src))>;
272def : Pat<(v8i16 (load xoaddr:$src)), (v8i16 (LVX xoaddr:$src))>;
273def : Pat<(v4i32 (load xoaddr:$src)), (v4i32 (LVX xoaddr:$src))>;
274
275// Stores.
276def : Pat<(store (v16i8 VRRC:$rS), xoaddr:$dst),
277 (STVX (v16i8 VRRC:$rS), xoaddr:$dst)>;
278def : Pat<(store (v8i16 VRRC:$rS), xoaddr:$dst),
279 (STVX (v8i16 VRRC:$rS), xoaddr:$dst)>;
280def : Pat<(store (v4i32 VRRC:$rS), xoaddr:$dst),
281 (STVX (v4i32 VRRC:$rS), xoaddr:$dst)>;
282
283// Bit conversions.
284def : Pat<(v16i8 (bitconvert (v8i16 VRRC:$src))), (v16i8 VRRC:$src)>;
285def : Pat<(v16i8 (bitconvert (v4i32 VRRC:$src))), (v16i8 VRRC:$src)>;
286def : Pat<(v16i8 (bitconvert (v4f32 VRRC:$src))), (v16i8 VRRC:$src)>;
287
288def : Pat<(v8i16 (bitconvert (v16i8 VRRC:$src))), (v8i16 VRRC:$src)>;
289def : Pat<(v8i16 (bitconvert (v4i32 VRRC:$src))), (v8i16 VRRC:$src)>;
290def : Pat<(v8i16 (bitconvert (v4f32 VRRC:$src))), (v8i16 VRRC:$src)>;
291
292def : Pat<(v4i32 (bitconvert (v16i8 VRRC:$src))), (v4i32 VRRC:$src)>;
293def : Pat<(v4i32 (bitconvert (v8i16 VRRC:$src))), (v4i32 VRRC:$src)>;
294def : Pat<(v4i32 (bitconvert (v4f32 VRRC:$src))), (v4i32 VRRC:$src)>;
295
296def : Pat<(v4f32 (bitconvert (v16i8 VRRC:$src))), (v4f32 VRRC:$src)>;
297def : Pat<(v4f32 (bitconvert (v8i16 VRRC:$src))), (v4f32 VRRC:$src)>;
298def : Pat<(v4f32 (bitconvert (v4i32 VRRC:$src))), (v4f32 VRRC:$src)>;
299
300// Immediate vector formation with vsplti*.
301def : Pat<(v16i8 vecspltisb:$invec), (v16i8 (VSPLTISB vecspltisb:$invec))>;
302def : Pat<(v16i8 vecspltish:$invec), (v16i8 (VSPLTISH vecspltish:$invec))>;
303def : Pat<(v16i8 vecspltisw:$invec), (v16i8 (VSPLTISW vecspltisw:$invec))>;
304
305def : Pat<(v8i16 vecspltisb:$invec), (v8i16 (VSPLTISB vecspltisb:$invec))>;
306def : Pat<(v8i16 vecspltish:$invec), (v8i16 (VSPLTISH vecspltish:$invec))>;
307def : Pat<(v8i16 vecspltisw:$invec), (v8i16 (VSPLTISW vecspltisw:$invec))>;
308
309def : Pat<(v4i32 vecspltisb:$invec), (v4i32 (VSPLTISB vecspltisb:$invec))>;
310def : Pat<(v4i32 vecspltish:$invec), (v4i32 (VSPLTISH vecspltish:$invec))>;
311def : Pat<(v4i32 vecspltisw:$invec), (v4i32 (VSPLTISW vecspltisw:$invec))>;
312
Chris Lattner2430a5f2006-03-25 22:16:05 +0000313// Logical Operations
314def : Pat<(v16i8 (and VRRC:$A, VRRC:$B)), (v16i8 (VAND VRRC:$A, VRRC:$B))>;
315def : Pat<(v8i16 (and VRRC:$A, VRRC:$B)), (v8i16 (VAND VRRC:$A, VRRC:$B))>;
316def : Pat<(v16i8 (or VRRC:$A, VRRC:$B)), (v16i8 (VOR VRRC:$A, VRRC:$B))>;
317def : Pat<(v8i16 (or VRRC:$A, VRRC:$B)), (v8i16 (VOR VRRC:$A, VRRC:$B))>;
318def : Pat<(v16i8 (xor VRRC:$A, VRRC:$B)), (v16i8 (VXOR VRRC:$A, VRRC:$B))>;
319def : Pat<(v8i16 (xor VRRC:$A, VRRC:$B)), (v8i16 (VXOR VRRC:$A, VRRC:$B))>;
Chris Lattner6509ae82006-03-25 23:05:29 +0000320def : Pat<(v16i8 (vnot (or VRRC:$A, VRRC:$B))),(v16i8 (VNOR VRRC:$A, VRRC:$B))>;
321def : Pat<(v8i16 (vnot (or VRRC:$A, VRRC:$B))),(v8i16 (VNOR VRRC:$A, VRRC:$B))>;
Chris Lattneraf9136b2006-03-25 23:10:40 +0000322def : Pat<(v16i8 (and VRRC:$A, (vnot VRRC:$B))),
Chris Lattner6509ae82006-03-25 23:05:29 +0000323 (v16i8 (VANDC VRRC:$A, VRRC:$B))>;
Chris Lattneraf9136b2006-03-25 23:10:40 +0000324def : Pat<(v8i16 (and VRRC:$A, (vnot VRRC:$B))),
Chris Lattner6509ae82006-03-25 23:05:29 +0000325 (v8i16 (VANDC VRRC:$A, VRRC:$B))>;
Chris Lattnerb22a04d2006-03-25 07:51:43 +0000326
327def : Pat<(fmul VRRC:$vA, VRRC:$vB),
328 (VMADDFP VRRC:$vA, VRRC:$vB, (V_SET0))>;
329
330// Fused multiply add and multiply sub for packed float. These are represented
331// separately from the real instructions above, for operations that must have
332// the additional precision, such as Newton-Rhapson (used by divide, sqrt)
333def : Pat<(PPCvmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
334 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
335def : Pat<(PPCvnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
336 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
337
338def : Pat<(int_ppc_altivec_vmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
339 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
340def : Pat<(int_ppc_altivec_vnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
341 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
342
343def : Pat<(vector_shuffle (v4i32 VRRC:$vB), (undef), VSPLT_shuffle_mask:$UIMM),
344 (v4i32 (VSPLTW VSPLT_shuffle_mask:$UIMM, VRRC:$vB))>;
345
346def : Pat<(PPCvperm (v4i32 VRRC:$vA), VRRC:$vB, VRRC:$vC),
347 (v4i32 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>;
348
349def : Pat<(v4i32 (PPClve_x xoaddr:$src)),
350 (v4i32 (LVEWX xoaddr:$src))>;
351
352