blob: 71c4e8bc147faad8a68128af53de1d192422c085 [file] [log] [blame]
David Greene509be1f2010-02-09 23:52:19 +00001//======- X86InstrFragmentsSIMD.td - x86 ISA -------------*- tablegen -*-=====//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file provides pattern fragments useful for SIMD instructions.
11//
12//===----------------------------------------------------------------------===//
13
14//===----------------------------------------------------------------------===//
15// MMX Pattern Fragments
16//===----------------------------------------------------------------------===//
17
18def load_mmx : PatFrag<(ops node:$ptr), (v1i64 (load node:$ptr))>;
19
20def bc_v8i8 : PatFrag<(ops node:$in), (v8i8 (bitconvert node:$in))>;
21def bc_v4i16 : PatFrag<(ops node:$in), (v4i16 (bitconvert node:$in))>;
22def bc_v2i32 : PatFrag<(ops node:$in), (v2i32 (bitconvert node:$in))>;
23def bc_v1i64 : PatFrag<(ops node:$in), (v1i64 (bitconvert node:$in))>;
24
25//===----------------------------------------------------------------------===//
26// MMX Masks
27//===----------------------------------------------------------------------===//
28
29// MMX_SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to
30// PSHUFW imm.
31def MMX_SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
32 return getI8Imm(X86::getShuffleSHUFImmediate(N));
33}]>;
34
35// Patterns for: vector_shuffle v1, v2, <2, 6, 3, 7, ...>
36def mmx_unpckh : PatFrag<(ops node:$lhs, node:$rhs),
37 (vector_shuffle node:$lhs, node:$rhs), [{
38 return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
39}]>;
40
41// Patterns for: vector_shuffle v1, v2, <0, 4, 2, 5, ...>
42def mmx_unpckl : PatFrag<(ops node:$lhs, node:$rhs),
43 (vector_shuffle node:$lhs, node:$rhs), [{
44 return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
45}]>;
46
47// Patterns for: vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
48def mmx_unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
49 (vector_shuffle node:$lhs, node:$rhs), [{
50 return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
51}]>;
52
53// Patterns for: vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
54def mmx_unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
55 (vector_shuffle node:$lhs, node:$rhs), [{
56 return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
57}]>;
58
59def mmx_pshufw : PatFrag<(ops node:$lhs, node:$rhs),
60 (vector_shuffle node:$lhs, node:$rhs), [{
61 return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
62}], MMX_SHUFFLE_get_shuf_imm>;
David Greene03264ef2010-07-12 23:41:28 +000063
64//===----------------------------------------------------------------------===//
65// SSE specific DAG Nodes.
66//===----------------------------------------------------------------------===//
67
68def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
69 SDTCisFP<0>, SDTCisInt<2> ]>;
70def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
71 SDTCisFP<1>, SDTCisVT<3, i8>]>;
72
73def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
74def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
75def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
76 [SDNPCommutative, SDNPAssociative]>;
77def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
78 [SDNPCommutative, SDNPAssociative]>;
79def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
80 [SDNPCommutative, SDNPAssociative]>;
81def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
82def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
83def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
84def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
85def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
86def X86pshufb : SDNode<"X86ISD::PSHUFB",
87 SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
88 SDTCisSameAs<0,2>]>>;
89def X86pextrb : SDNode<"X86ISD::PEXTRB",
90 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
91def X86pextrw : SDNode<"X86ISD::PEXTRW",
92 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
93def X86pinsrb : SDNode<"X86ISD::PINSRB",
94 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
95 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
96def X86pinsrw : SDNode<"X86ISD::PINSRW",
97 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
98 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
99def X86insrtps : SDNode<"X86ISD::INSERTPS",
100 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
101 SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
102def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
103 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
104def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
105 [SDNPHasChain, SDNPMayLoad]>;
106def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
107def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
108def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
109def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>;
110def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
111def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
112def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
113def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
114def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
115def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
116def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
117def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
118
119def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
120 SDTCisVT<1, v4f32>,
121 SDTCisVT<2, v4f32>]>;
122def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
123
124//===----------------------------------------------------------------------===//
125// SSE Complex Patterns
126//===----------------------------------------------------------------------===//
127
128// These are 'extloads' from a scalar to the low element of a vector, zeroing
129// the top elements. These are used for the SSE 'ss' and 'sd' instruction
130// forms.
131def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
132 [SDNPHasChain, SDNPMayLoad]>;
133def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
134 [SDNPHasChain, SDNPMayLoad]>;
135
136def ssmem : Operand<v4f32> {
137 let PrintMethod = "printf32mem";
138 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
139 let ParserMatchClass = X86MemAsmOperand;
140}
141def sdmem : Operand<v2f64> {
142 let PrintMethod = "printf64mem";
143 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
144 let ParserMatchClass = X86MemAsmOperand;
145}
146
147//===----------------------------------------------------------------------===//
148// SSE pattern fragments
149//===----------------------------------------------------------------------===//
150
151def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
152def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
153def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
154def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
155
156// FIXME: move this to a more appropriate place after all AVX is done.
157def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
158def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
159def loadv8i32 : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>;
160def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
161
162// Like 'store', but always requires vector alignment.
163def alignedstore : PatFrag<(ops node:$val, node:$ptr),
164 (store node:$val, node:$ptr), [{
165 return cast<StoreSDNode>(N)->getAlignment() >= 16;
166}]>;
167
168// Like 'load', but always requires vector alignment.
169def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
170 return cast<LoadSDNode>(N)->getAlignment() >= 16;
171}]>;
172
173def alignedloadfsf32 : PatFrag<(ops node:$ptr),
174 (f32 (alignedload node:$ptr))>;
175def alignedloadfsf64 : PatFrag<(ops node:$ptr),
176 (f64 (alignedload node:$ptr))>;
177def alignedloadv4f32 : PatFrag<(ops node:$ptr),
178 (v4f32 (alignedload node:$ptr))>;
179def alignedloadv2f64 : PatFrag<(ops node:$ptr),
180 (v2f64 (alignedload node:$ptr))>;
181def alignedloadv4i32 : PatFrag<(ops node:$ptr),
182 (v4i32 (alignedload node:$ptr))>;
183def alignedloadv2i64 : PatFrag<(ops node:$ptr),
184 (v2i64 (alignedload node:$ptr))>;
185
186// FIXME: move this to a more appropriate place after all AVX is done.
187def alignedloadv8f32 : PatFrag<(ops node:$ptr),
188 (v8f32 (alignedload node:$ptr))>;
189def alignedloadv4f64 : PatFrag<(ops node:$ptr),
190 (v4f64 (alignedload node:$ptr))>;
191def alignedloadv8i32 : PatFrag<(ops node:$ptr),
192 (v8i32 (alignedload node:$ptr))>;
193def alignedloadv4i64 : PatFrag<(ops node:$ptr),
194 (v4i64 (alignedload node:$ptr))>;
195
196// Like 'load', but uses special alignment checks suitable for use in
197// memory operands in most SSE instructions, which are required to
198// be naturally aligned on some targets but not on others. If the subtarget
199// allows unaligned accesses, match any load, though this may require
200// setting a feature bit in the processor (on startup, for example).
201// Opteron 10h and later implement such a feature.
202def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
203 return Subtarget->hasVectorUAMem()
204 || cast<LoadSDNode>(N)->getAlignment() >= 16;
205}]>;
206
207def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
208def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
209def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
210def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
211def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
212def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
213def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
214
215// FIXME: move this to a more appropriate place after all AVX is done.
216def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
217def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
218
219// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
220// 16-byte boundary.
221// FIXME: 8 byte alignment for mmx reads is not required
222def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
223 return cast<LoadSDNode>(N)->getAlignment() >= 8;
224}]>;
225
226def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
227def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
228def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
229def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
230
231// MOVNT Support
232// Like 'store', but requires the non-temporal bit to be set
233def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
234 (st node:$val, node:$ptr), [{
235 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
236 return ST->isNonTemporal();
237 return false;
238}]>;
239
240def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
241 (st node:$val, node:$ptr), [{
242 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
243 return ST->isNonTemporal() && !ST->isTruncatingStore() &&
244 ST->getAddressingMode() == ISD::UNINDEXED &&
245 ST->getAlignment() >= 16;
246 return false;
247}]>;
248
249def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
250 (st node:$val, node:$ptr), [{
251 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
252 return ST->isNonTemporal() &&
253 ST->getAlignment() < 16;
254 return false;
255}]>;
256
257def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
258def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
259def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
260def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
261def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
262def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
263
264def vzmovl_v2i64 : PatFrag<(ops node:$src),
265 (bitconvert (v2i64 (X86vzmovl
266 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
267def vzmovl_v4i32 : PatFrag<(ops node:$src),
268 (bitconvert (v4i32 (X86vzmovl
269 (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
270
271def vzload_v2i64 : PatFrag<(ops node:$src),
272 (bitconvert (v2i64 (X86vzload node:$src)))>;
273
274
275def fp32imm0 : PatLeaf<(f32 fpimm), [{
276 return N->isExactlyValue(+0.0);
277}]>;
278
279// BYTE_imm - Transform bit immediates into byte immediates.
280def BYTE_imm : SDNodeXForm<imm, [{
281 // Transformation function: imm >> 3
282 return getI32Imm(N->getZExtValue() >> 3);
283}]>;
284
285// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
286// SHUFP* etc. imm.
287def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
288 return getI8Imm(X86::getShuffleSHUFImmediate(N));
289}]>;
290
291// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
292// PSHUFHW imm.
293def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{
294 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
295}]>;
296
297// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
298// PSHUFLW imm.
299def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{
300 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
301}]>;
302
303// SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to
304// a PALIGNR imm.
305def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
306 return getI8Imm(X86::getShufflePALIGNRImmediate(N));
307}]>;
308
309def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
310 (vector_shuffle node:$lhs, node:$rhs), [{
311 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
312 return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
313}]>;
314
315def movddup : PatFrag<(ops node:$lhs, node:$rhs),
316 (vector_shuffle node:$lhs, node:$rhs), [{
317 return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
318}]>;
319
320def movhlps : PatFrag<(ops node:$lhs, node:$rhs),
321 (vector_shuffle node:$lhs, node:$rhs), [{
322 return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
323}]>;
324
325def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs),
326 (vector_shuffle node:$lhs, node:$rhs), [{
327 return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
328}]>;
329
330def movlhps : PatFrag<(ops node:$lhs, node:$rhs),
331 (vector_shuffle node:$lhs, node:$rhs), [{
332 return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
333}]>;
334
335def movlp : PatFrag<(ops node:$lhs, node:$rhs),
336 (vector_shuffle node:$lhs, node:$rhs), [{
337 return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
338}]>;
339
340def movl : PatFrag<(ops node:$lhs, node:$rhs),
341 (vector_shuffle node:$lhs, node:$rhs), [{
342 return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
343}]>;
344
345def movshdup : PatFrag<(ops node:$lhs, node:$rhs),
346 (vector_shuffle node:$lhs, node:$rhs), [{
347 return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
348}]>;
349
350def movsldup : PatFrag<(ops node:$lhs, node:$rhs),
351 (vector_shuffle node:$lhs, node:$rhs), [{
352 return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
353}]>;
354
355def unpckl : PatFrag<(ops node:$lhs, node:$rhs),
356 (vector_shuffle node:$lhs, node:$rhs), [{
357 return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
358}]>;
359
360def unpckh : PatFrag<(ops node:$lhs, node:$rhs),
361 (vector_shuffle node:$lhs, node:$rhs), [{
362 return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
363}]>;
364
365def unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
366 (vector_shuffle node:$lhs, node:$rhs), [{
367 return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
368}]>;
369
370def unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
371 (vector_shuffle node:$lhs, node:$rhs), [{
372 return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
373}]>;
374
375def pshufd : PatFrag<(ops node:$lhs, node:$rhs),
376 (vector_shuffle node:$lhs, node:$rhs), [{
377 return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
378}], SHUFFLE_get_shuf_imm>;
379
380def shufp : PatFrag<(ops node:$lhs, node:$rhs),
381 (vector_shuffle node:$lhs, node:$rhs), [{
382 return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
383}], SHUFFLE_get_shuf_imm>;
384
385def pshufhw : PatFrag<(ops node:$lhs, node:$rhs),
386 (vector_shuffle node:$lhs, node:$rhs), [{
387 return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
388}], SHUFFLE_get_pshufhw_imm>;
389
390def pshuflw : PatFrag<(ops node:$lhs, node:$rhs),
391 (vector_shuffle node:$lhs, node:$rhs), [{
392 return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
393}], SHUFFLE_get_pshuflw_imm>;
394
395def palign : PatFrag<(ops node:$lhs, node:$rhs),
396 (vector_shuffle node:$lhs, node:$rhs), [{
397 return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
398}], SHUFFLE_get_palign_imm>;