blob: a00b0803a3ee28ec75667233021aa13b8021fe21 [file] [log] [blame]
David Greene51898d72010-02-09 23:52:19 +00001//======- X86InstrFragmentsSIMD.td - x86 ISA -------------*- tablegen -*-=====//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file provides pattern fragments useful for SIMD instructions.
11//
12//===----------------------------------------------------------------------===//
13
14//===----------------------------------------------------------------------===//
15// MMX Pattern Fragments
16//===----------------------------------------------------------------------===//
17
18def load_mmx : PatFrag<(ops node:$ptr), (v1i64 (load node:$ptr))>;
19
20def bc_v8i8 : PatFrag<(ops node:$in), (v8i8 (bitconvert node:$in))>;
21def bc_v4i16 : PatFrag<(ops node:$in), (v4i16 (bitconvert node:$in))>;
22def bc_v2i32 : PatFrag<(ops node:$in), (v2i32 (bitconvert node:$in))>;
23def bc_v1i64 : PatFrag<(ops node:$in), (v1i64 (bitconvert node:$in))>;
24
25//===----------------------------------------------------------------------===//
26// MMX Masks
27//===----------------------------------------------------------------------===//
28
29// MMX_SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to
30// PSHUFW imm.
31def MMX_SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
32 return getI8Imm(X86::getShuffleSHUFImmediate(N));
33}]>;
34
35// Patterns for: vector_shuffle v1, v2, <2, 6, 3, 7, ...>
36def mmx_unpckh : PatFrag<(ops node:$lhs, node:$rhs),
37 (vector_shuffle node:$lhs, node:$rhs), [{
38 return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
39}]>;
40
41// Patterns for: vector_shuffle v1, v2, <0, 4, 2, 5, ...>
42def mmx_unpckl : PatFrag<(ops node:$lhs, node:$rhs),
43 (vector_shuffle node:$lhs, node:$rhs), [{
44 return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
45}]>;
46
47// Patterns for: vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
48def mmx_unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
49 (vector_shuffle node:$lhs, node:$rhs), [{
50 return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
51}]>;
52
53// Patterns for: vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
54def mmx_unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
55 (vector_shuffle node:$lhs, node:$rhs), [{
56 return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
57}]>;
58
59def mmx_pshufw : PatFrag<(ops node:$lhs, node:$rhs),
60 (vector_shuffle node:$lhs, node:$rhs), [{
61 return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
62}], MMX_SHUFFLE_get_shuf_imm>;
David Greene8f17bc42010-07-12 23:41:28 +000063
64//===----------------------------------------------------------------------===//
65// SSE specific DAG Nodes.
66//===----------------------------------------------------------------------===//
67
68def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
69 SDTCisFP<0>, SDTCisInt<2> ]>;
70def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
71 SDTCisFP<1>, SDTCisVT<3, i8>]>;
72
73def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
74def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
75def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
76 [SDNPCommutative, SDNPAssociative]>;
77def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
78 [SDNPCommutative, SDNPAssociative]>;
79def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
80 [SDNPCommutative, SDNPAssociative]>;
81def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
82def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
83def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
84def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
85def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
86def X86pshufb : SDNode<"X86ISD::PSHUFB",
87 SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
88 SDTCisSameAs<0,2>]>>;
89def X86pextrb : SDNode<"X86ISD::PEXTRB",
90 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
91def X86pextrw : SDNode<"X86ISD::PEXTRW",
92 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
93def X86pinsrb : SDNode<"X86ISD::PINSRB",
94 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
95 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
96def X86pinsrw : SDNode<"X86ISD::PINSRW",
97 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
98 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
99def X86insrtps : SDNode<"X86ISD::INSERTPS",
100 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
101 SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
102def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
103 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
104def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
105 [SDNPHasChain, SDNPMayLoad]>;
106def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
107def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
108def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
109def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>;
110def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
111def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
112def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
113def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
114def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
115def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
116def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
117def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
118
119def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
120 SDTCisVT<1, v4f32>,
121 SDTCisVT<2, v4f32>]>;
122def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
123
124//===----------------------------------------------------------------------===//
125// SSE Complex Patterns
126//===----------------------------------------------------------------------===//
127
128// These are 'extloads' from a scalar to the low element of a vector, zeroing
129// the top elements. These are used for the SSE 'ss' and 'sd' instruction
130// forms.
131def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
132 [SDNPHasChain, SDNPMayLoad]>;
133def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
134 [SDNPHasChain, SDNPMayLoad]>;
135
136def ssmem : Operand<v4f32> {
137 let PrintMethod = "printf32mem";
138 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
139 let ParserMatchClass = X86MemAsmOperand;
140}
141def sdmem : Operand<v2f64> {
142 let PrintMethod = "printf64mem";
143 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
144 let ParserMatchClass = X86MemAsmOperand;
145}
146
147//===----------------------------------------------------------------------===//
148// SSE pattern fragments
149//===----------------------------------------------------------------------===//
150
151def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
152def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
153def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
154def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
155
156// FIXME: move this to a more appropriate place after all AVX is done.
157def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
158def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
159def loadv8i32 : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>;
160def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
161
162// Like 'store', but always requires vector alignment.
163def alignedstore : PatFrag<(ops node:$val, node:$ptr),
164 (store node:$val, node:$ptr), [{
165 return cast<StoreSDNode>(N)->getAlignment() >= 16;
166}]>;
167
168// Like 'load', but always requires vector alignment.
169def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
170 return cast<LoadSDNode>(N)->getAlignment() >= 16;
171}]>;
172
173def alignedloadfsf32 : PatFrag<(ops node:$ptr),
174 (f32 (alignedload node:$ptr))>;
175def alignedloadfsf64 : PatFrag<(ops node:$ptr),
176 (f64 (alignedload node:$ptr))>;
177def alignedloadv4f32 : PatFrag<(ops node:$ptr),
178 (v4f32 (alignedload node:$ptr))>;
179def alignedloadv2f64 : PatFrag<(ops node:$ptr),
180 (v2f64 (alignedload node:$ptr))>;
181def alignedloadv4i32 : PatFrag<(ops node:$ptr),
182 (v4i32 (alignedload node:$ptr))>;
183def alignedloadv2i64 : PatFrag<(ops node:$ptr),
184 (v2i64 (alignedload node:$ptr))>;
185
186// FIXME: move this to a more appropriate place after all AVX is done.
187def alignedloadv8f32 : PatFrag<(ops node:$ptr),
188 (v8f32 (alignedload node:$ptr))>;
189def alignedloadv4f64 : PatFrag<(ops node:$ptr),
190 (v4f64 (alignedload node:$ptr))>;
191def alignedloadv8i32 : PatFrag<(ops node:$ptr),
192 (v8i32 (alignedload node:$ptr))>;
193def alignedloadv4i64 : PatFrag<(ops node:$ptr),
194 (v4i64 (alignedload node:$ptr))>;
195
196// Like 'load', but uses special alignment checks suitable for use in
197// memory operands in most SSE instructions, which are required to
198// be naturally aligned on some targets but not on others. If the subtarget
199// allows unaligned accesses, match any load, though this may require
200// setting a feature bit in the processor (on startup, for example).
201// Opteron 10h and later implement such a feature.
202def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
203 return Subtarget->hasVectorUAMem()
204 || cast<LoadSDNode>(N)->getAlignment() >= 16;
205}]>;
206
207def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
208def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
209def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
210def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
211def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
212def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
213def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
214
215// FIXME: move this to a more appropriate place after all AVX is done.
Bruno Cardoso Lopes94143ee2010-07-19 23:32:44 +0000216def memopv32i8 : PatFrag<(ops node:$ptr), (v32i8 (memop node:$ptr))>;
David Greene8f17bc42010-07-12 23:41:28 +0000217def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
218def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
Bruno Cardoso Lopesbd2d90f2010-08-06 20:03:27 +0000219def memopv4i64 : PatFrag<(ops node:$ptr), (v4i64 (memop node:$ptr))>;
220def memopv8i32 : PatFrag<(ops node:$ptr), (v8i32 (memop node:$ptr))>;
David Greene8f17bc42010-07-12 23:41:28 +0000221
222// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
223// 16-byte boundary.
224// FIXME: 8 byte alignment for mmx reads is not required
225def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
226 return cast<LoadSDNode>(N)->getAlignment() >= 8;
227}]>;
228
229def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
230def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
231def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
232def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
233
234// MOVNT Support
235// Like 'store', but requires the non-temporal bit to be set
236def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
237 (st node:$val, node:$ptr), [{
238 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
239 return ST->isNonTemporal();
240 return false;
241}]>;
242
243def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
244 (st node:$val, node:$ptr), [{
245 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
246 return ST->isNonTemporal() && !ST->isTruncatingStore() &&
247 ST->getAddressingMode() == ISD::UNINDEXED &&
248 ST->getAlignment() >= 16;
249 return false;
250}]>;
251
252def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
253 (st node:$val, node:$ptr), [{
254 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
255 return ST->isNonTemporal() &&
256 ST->getAlignment() < 16;
257 return false;
258}]>;
259
260def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
261def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
262def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
263def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
264def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
265def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
266
Bruno Cardoso Lopes2b691432010-07-21 23:53:50 +0000267// FIXME: move this to a more appropriate place after all AVX is done.
268def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>;
269
David Greene8f17bc42010-07-12 23:41:28 +0000270def vzmovl_v2i64 : PatFrag<(ops node:$src),
271 (bitconvert (v2i64 (X86vzmovl
272 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
273def vzmovl_v4i32 : PatFrag<(ops node:$src),
274 (bitconvert (v4i32 (X86vzmovl
275 (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
276
277def vzload_v2i64 : PatFrag<(ops node:$src),
278 (bitconvert (v2i64 (X86vzload node:$src)))>;
279
280
281def fp32imm0 : PatLeaf<(f32 fpimm), [{
282 return N->isExactlyValue(+0.0);
283}]>;
284
285// BYTE_imm - Transform bit immediates into byte immediates.
286def BYTE_imm : SDNodeXForm<imm, [{
287 // Transformation function: imm >> 3
288 return getI32Imm(N->getZExtValue() >> 3);
289}]>;
290
291// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
292// SHUFP* etc. imm.
293def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
294 return getI8Imm(X86::getShuffleSHUFImmediate(N));
295}]>;
296
297// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
298// PSHUFHW imm.
299def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{
300 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
301}]>;
302
303// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
304// PSHUFLW imm.
305def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{
306 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
307}]>;
308
309// SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to
310// a PALIGNR imm.
311def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
312 return getI8Imm(X86::getShufflePALIGNRImmediate(N));
313}]>;
314
315def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
316 (vector_shuffle node:$lhs, node:$rhs), [{
317 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
318 return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
319}]>;
320
321def movddup : PatFrag<(ops node:$lhs, node:$rhs),
322 (vector_shuffle node:$lhs, node:$rhs), [{
323 return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
324}]>;
325
326def movhlps : PatFrag<(ops node:$lhs, node:$rhs),
327 (vector_shuffle node:$lhs, node:$rhs), [{
328 return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
329}]>;
330
331def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs),
332 (vector_shuffle node:$lhs, node:$rhs), [{
333 return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
334}]>;
335
336def movlhps : PatFrag<(ops node:$lhs, node:$rhs),
337 (vector_shuffle node:$lhs, node:$rhs), [{
338 return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
339}]>;
340
341def movlp : PatFrag<(ops node:$lhs, node:$rhs),
342 (vector_shuffle node:$lhs, node:$rhs), [{
343 return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
344}]>;
345
346def movl : PatFrag<(ops node:$lhs, node:$rhs),
347 (vector_shuffle node:$lhs, node:$rhs), [{
348 return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
349}]>;
350
351def movshdup : PatFrag<(ops node:$lhs, node:$rhs),
352 (vector_shuffle node:$lhs, node:$rhs), [{
353 return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
354}]>;
355
356def movsldup : PatFrag<(ops node:$lhs, node:$rhs),
357 (vector_shuffle node:$lhs, node:$rhs), [{
358 return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
359}]>;
360
361def unpckl : PatFrag<(ops node:$lhs, node:$rhs),
362 (vector_shuffle node:$lhs, node:$rhs), [{
363 return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
364}]>;
365
366def unpckh : PatFrag<(ops node:$lhs, node:$rhs),
367 (vector_shuffle node:$lhs, node:$rhs), [{
368 return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
369}]>;
370
371def unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
372 (vector_shuffle node:$lhs, node:$rhs), [{
373 return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
374}]>;
375
376def unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
377 (vector_shuffle node:$lhs, node:$rhs), [{
378 return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
379}]>;
380
381def pshufd : PatFrag<(ops node:$lhs, node:$rhs),
382 (vector_shuffle node:$lhs, node:$rhs), [{
383 return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
384}], SHUFFLE_get_shuf_imm>;
385
386def shufp : PatFrag<(ops node:$lhs, node:$rhs),
387 (vector_shuffle node:$lhs, node:$rhs), [{
388 return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
389}], SHUFFLE_get_shuf_imm>;
390
391def pshufhw : PatFrag<(ops node:$lhs, node:$rhs),
392 (vector_shuffle node:$lhs, node:$rhs), [{
393 return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
394}], SHUFFLE_get_pshufhw_imm>;
395
396def pshuflw : PatFrag<(ops node:$lhs, node:$rhs),
397 (vector_shuffle node:$lhs, node:$rhs), [{
398 return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
399}], SHUFFLE_get_pshuflw_imm>;
400
401def palign : PatFrag<(ops node:$lhs, node:$rhs),
402 (vector_shuffle node:$lhs, node:$rhs), [{
403 return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
404}], SHUFFLE_get_palign_imm>;