blob: 8a1245c5994b6d31426033208a6055a675d042f3 [file] [log] [blame]
David Greene509be1f2010-02-09 23:52:19 +00001//======- X86InstrFragmentsSIMD.td - x86 ISA -------------*- tablegen -*-=====//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file provides pattern fragments useful for SIMD instructions.
11//
12//===----------------------------------------------------------------------===//
13
14//===----------------------------------------------------------------------===//
15// MMX Pattern Fragments
16//===----------------------------------------------------------------------===//
17
18def load_mmx : PatFrag<(ops node:$ptr), (v1i64 (load node:$ptr))>;
19
20def bc_v8i8 : PatFrag<(ops node:$in), (v8i8 (bitconvert node:$in))>;
21def bc_v4i16 : PatFrag<(ops node:$in), (v4i16 (bitconvert node:$in))>;
22def bc_v2i32 : PatFrag<(ops node:$in), (v2i32 (bitconvert node:$in))>;
23def bc_v1i64 : PatFrag<(ops node:$in), (v1i64 (bitconvert node:$in))>;
24
25//===----------------------------------------------------------------------===//
26// MMX Masks
27//===----------------------------------------------------------------------===//
28
29// MMX_SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to
30// PSHUFW imm.
31def MMX_SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
32 return getI8Imm(X86::getShuffleSHUFImmediate(N));
33}]>;
34
35// Patterns for: vector_shuffle v1, v2, <2, 6, 3, 7, ...>
36def mmx_unpckh : PatFrag<(ops node:$lhs, node:$rhs),
37 (vector_shuffle node:$lhs, node:$rhs), [{
38 return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
39}]>;
40
41// Patterns for: vector_shuffle v1, v2, <0, 4, 2, 5, ...>
42def mmx_unpckl : PatFrag<(ops node:$lhs, node:$rhs),
43 (vector_shuffle node:$lhs, node:$rhs), [{
44 return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
45}]>;
46
47// Patterns for: vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
48def mmx_unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
49 (vector_shuffle node:$lhs, node:$rhs), [{
50 return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
51}]>;
52
53// Patterns for: vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
54def mmx_unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
55 (vector_shuffle node:$lhs, node:$rhs), [{
56 return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
57}]>;
58
59def mmx_pshufw : PatFrag<(ops node:$lhs, node:$rhs),
60 (vector_shuffle node:$lhs, node:$rhs), [{
61 return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
62}], MMX_SHUFFLE_get_shuf_imm>;
David Greene03264ef2010-07-12 23:41:28 +000063
64//===----------------------------------------------------------------------===//
65// SSE specific DAG Nodes.
66//===----------------------------------------------------------------------===//
67
68def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
69 SDTCisFP<0>, SDTCisInt<2> ]>;
70def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
71 SDTCisFP<1>, SDTCisVT<3, i8>]>;
72
73def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
74def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
75def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
76 [SDNPCommutative, SDNPAssociative]>;
77def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
78 [SDNPCommutative, SDNPAssociative]>;
79def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
80 [SDNPCommutative, SDNPAssociative]>;
81def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
82def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
83def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
84def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
85def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
86def X86pshufb : SDNode<"X86ISD::PSHUFB",
87 SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
88 SDTCisSameAs<0,2>]>>;
89def X86pextrb : SDNode<"X86ISD::PEXTRB",
90 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
91def X86pextrw : SDNode<"X86ISD::PEXTRW",
92 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
93def X86pinsrb : SDNode<"X86ISD::PINSRB",
94 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
95 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
96def X86pinsrw : SDNode<"X86ISD::PINSRW",
97 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
98 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
99def X86insrtps : SDNode<"X86ISD::INSERTPS",
100 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
101 SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
102def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
103 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
104def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
105 [SDNPHasChain, SDNPMayLoad]>;
106def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
107def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
108def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
109def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>;
110def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
111def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
112def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
113def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
114def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
115def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
116def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
117def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
118
119def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
120 SDTCisVT<1, v4f32>,
121 SDTCisVT<2, v4f32>]>;
122def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
123
124//===----------------------------------------------------------------------===//
125// SSE Complex Patterns
126//===----------------------------------------------------------------------===//
127
128// These are 'extloads' from a scalar to the low element of a vector, zeroing
129// the top elements. These are used for the SSE 'ss' and 'sd' instruction
130// forms.
131def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
132 [SDNPHasChain, SDNPMayLoad]>;
133def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
134 [SDNPHasChain, SDNPMayLoad]>;
135
136def ssmem : Operand<v4f32> {
137 let PrintMethod = "printf32mem";
138 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
139 let ParserMatchClass = X86MemAsmOperand;
140}
141def sdmem : Operand<v2f64> {
142 let PrintMethod = "printf64mem";
143 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
144 let ParserMatchClass = X86MemAsmOperand;
145}
146
147//===----------------------------------------------------------------------===//
148// SSE pattern fragments
149//===----------------------------------------------------------------------===//
150
151def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
152def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
153def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
154def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
155
156// FIXME: move this to a more appropriate place after all AVX is done.
157def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
158def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
159def loadv8i32 : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>;
160def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
161
162// Like 'store', but always requires vector alignment.
163def alignedstore : PatFrag<(ops node:$val, node:$ptr),
164 (store node:$val, node:$ptr), [{
165 return cast<StoreSDNode>(N)->getAlignment() >= 16;
166}]>;
167
168// Like 'load', but always requires vector alignment.
169def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
170 return cast<LoadSDNode>(N)->getAlignment() >= 16;
171}]>;
172
173def alignedloadfsf32 : PatFrag<(ops node:$ptr),
174 (f32 (alignedload node:$ptr))>;
175def alignedloadfsf64 : PatFrag<(ops node:$ptr),
176 (f64 (alignedload node:$ptr))>;
177def alignedloadv4f32 : PatFrag<(ops node:$ptr),
178 (v4f32 (alignedload node:$ptr))>;
179def alignedloadv2f64 : PatFrag<(ops node:$ptr),
180 (v2f64 (alignedload node:$ptr))>;
181def alignedloadv4i32 : PatFrag<(ops node:$ptr),
182 (v4i32 (alignedload node:$ptr))>;
183def alignedloadv2i64 : PatFrag<(ops node:$ptr),
184 (v2i64 (alignedload node:$ptr))>;
185
186// FIXME: move this to a more appropriate place after all AVX is done.
187def alignedloadv8f32 : PatFrag<(ops node:$ptr),
188 (v8f32 (alignedload node:$ptr))>;
189def alignedloadv4f64 : PatFrag<(ops node:$ptr),
190 (v4f64 (alignedload node:$ptr))>;
191def alignedloadv8i32 : PatFrag<(ops node:$ptr),
192 (v8i32 (alignedload node:$ptr))>;
193def alignedloadv4i64 : PatFrag<(ops node:$ptr),
194 (v4i64 (alignedload node:$ptr))>;
195
196// Like 'load', but uses special alignment checks suitable for use in
197// memory operands in most SSE instructions, which are required to
198// be naturally aligned on some targets but not on others. If the subtarget
199// allows unaligned accesses, match any load, though this may require
200// setting a feature bit in the processor (on startup, for example).
201// Opteron 10h and later implement such a feature.
202def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
203 return Subtarget->hasVectorUAMem()
204 || cast<LoadSDNode>(N)->getAlignment() >= 16;
205}]>;
206
207def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
208def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
209def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
210def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
211def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
212def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
213def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
214
215// FIXME: move this to a more appropriate place after all AVX is done.
Bruno Cardoso Lopes9de0ca72010-07-19 23:32:44 +0000216def memopv32i8 : PatFrag<(ops node:$ptr), (v32i8 (memop node:$ptr))>;
David Greene03264ef2010-07-12 23:41:28 +0000217def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
218def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
219
220// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
221// 16-byte boundary.
222// FIXME: 8 byte alignment for mmx reads is not required
223def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
224 return cast<LoadSDNode>(N)->getAlignment() >= 8;
225}]>;
226
227def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
228def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
229def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
230def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
231
232// MOVNT Support
233// Like 'store', but requires the non-temporal bit to be set
234def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
235 (st node:$val, node:$ptr), [{
236 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
237 return ST->isNonTemporal();
238 return false;
239}]>;
240
241def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
242 (st node:$val, node:$ptr), [{
243 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
244 return ST->isNonTemporal() && !ST->isTruncatingStore() &&
245 ST->getAddressingMode() == ISD::UNINDEXED &&
246 ST->getAlignment() >= 16;
247 return false;
248}]>;
249
250def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
251 (st node:$val, node:$ptr), [{
252 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
253 return ST->isNonTemporal() &&
254 ST->getAlignment() < 16;
255 return false;
256}]>;
257
258def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
259def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
260def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
261def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
262def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
263def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
264
Bruno Cardoso Lopese3acfd42010-07-21 23:53:50 +0000265// FIXME: move this to a more appropriate place after all AVX is done.
266def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>;
267
David Greene03264ef2010-07-12 23:41:28 +0000268def vzmovl_v2i64 : PatFrag<(ops node:$src),
269 (bitconvert (v2i64 (X86vzmovl
270 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
271def vzmovl_v4i32 : PatFrag<(ops node:$src),
272 (bitconvert (v4i32 (X86vzmovl
273 (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
274
275def vzload_v2i64 : PatFrag<(ops node:$src),
276 (bitconvert (v2i64 (X86vzload node:$src)))>;
277
278
279def fp32imm0 : PatLeaf<(f32 fpimm), [{
280 return N->isExactlyValue(+0.0);
281}]>;
282
283// BYTE_imm - Transform bit immediates into byte immediates.
284def BYTE_imm : SDNodeXForm<imm, [{
285 // Transformation function: imm >> 3
286 return getI32Imm(N->getZExtValue() >> 3);
287}]>;
288
289// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
290// SHUFP* etc. imm.
291def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
292 return getI8Imm(X86::getShuffleSHUFImmediate(N));
293}]>;
294
295// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
296// PSHUFHW imm.
297def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{
298 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
299}]>;
300
301// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
302// PSHUFLW imm.
303def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{
304 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
305}]>;
306
307// SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to
308// a PALIGNR imm.
309def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
310 return getI8Imm(X86::getShufflePALIGNRImmediate(N));
311}]>;
312
313def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
314 (vector_shuffle node:$lhs, node:$rhs), [{
315 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
316 return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
317}]>;
318
319def movddup : PatFrag<(ops node:$lhs, node:$rhs),
320 (vector_shuffle node:$lhs, node:$rhs), [{
321 return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
322}]>;
323
324def movhlps : PatFrag<(ops node:$lhs, node:$rhs),
325 (vector_shuffle node:$lhs, node:$rhs), [{
326 return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
327}]>;
328
329def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs),
330 (vector_shuffle node:$lhs, node:$rhs), [{
331 return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
332}]>;
333
334def movlhps : PatFrag<(ops node:$lhs, node:$rhs),
335 (vector_shuffle node:$lhs, node:$rhs), [{
336 return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
337}]>;
338
339def movlp : PatFrag<(ops node:$lhs, node:$rhs),
340 (vector_shuffle node:$lhs, node:$rhs), [{
341 return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
342}]>;
343
344def movl : PatFrag<(ops node:$lhs, node:$rhs),
345 (vector_shuffle node:$lhs, node:$rhs), [{
346 return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
347}]>;
348
349def movshdup : PatFrag<(ops node:$lhs, node:$rhs),
350 (vector_shuffle node:$lhs, node:$rhs), [{
351 return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
352}]>;
353
354def movsldup : PatFrag<(ops node:$lhs, node:$rhs),
355 (vector_shuffle node:$lhs, node:$rhs), [{
356 return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
357}]>;
358
359def unpckl : PatFrag<(ops node:$lhs, node:$rhs),
360 (vector_shuffle node:$lhs, node:$rhs), [{
361 return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
362}]>;
363
364def unpckh : PatFrag<(ops node:$lhs, node:$rhs),
365 (vector_shuffle node:$lhs, node:$rhs), [{
366 return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
367}]>;
368
369def unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
370 (vector_shuffle node:$lhs, node:$rhs), [{
371 return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
372}]>;
373
374def unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
375 (vector_shuffle node:$lhs, node:$rhs), [{
376 return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
377}]>;
378
379def pshufd : PatFrag<(ops node:$lhs, node:$rhs),
380 (vector_shuffle node:$lhs, node:$rhs), [{
381 return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
382}], SHUFFLE_get_shuf_imm>;
383
384def shufp : PatFrag<(ops node:$lhs, node:$rhs),
385 (vector_shuffle node:$lhs, node:$rhs), [{
386 return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
387}], SHUFFLE_get_shuf_imm>;
388
389def pshufhw : PatFrag<(ops node:$lhs, node:$rhs),
390 (vector_shuffle node:$lhs, node:$rhs), [{
391 return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
392}], SHUFFLE_get_pshufhw_imm>;
393
394def pshuflw : PatFrag<(ops node:$lhs, node:$rhs),
395 (vector_shuffle node:$lhs, node:$rhs), [{
396 return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
397}], SHUFFLE_get_pshuflw_imm>;
398
399def palign : PatFrag<(ops node:$lhs, node:$rhs),
400 (vector_shuffle node:$lhs, node:$rhs), [{
401 return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
402}], SHUFFLE_get_palign_imm>;