blob: a73d5afc2fccd74e99403deb3a36bf1ecd52e884 [file] [log] [blame]
Arnold Schwaighofer373e8652007-10-12 21:30:57 +00001//====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner081ce942007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file describes the X86 SSE instruction set, defining the instructions,
11// and properties of the instructions which are needed for code generation,
12// machine code emission, and analysis.
13//
14//===----------------------------------------------------------------------===//
15
16
17//===----------------------------------------------------------------------===//
18// SSE specific DAG Nodes.
19//===----------------------------------------------------------------------===//
20
21def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
22 SDTCisFP<0>, SDTCisInt<2> ]>;
23
Dan Gohmanf17a25c2007-07-18 16:29:46 +000024def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
25def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
26def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
27 [SDNPCommutative, SDNPAssociative]>;
28def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
29 [SDNPCommutative, SDNPAssociative]>;
30def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
31 [SDNPCommutative, SDNPAssociative]>;
32def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
33def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
34def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
Evan Chengf37bf452007-10-01 18:12:48 +000035def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
Evan Cheng621216e2007-09-29 00:00:36 +000036def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +000037def X86s2vec : SDNode<"X86ISD::S2VEC", SDTypeProfile<1, 1, []>, []>;
38def X86pextrw : SDNode<"X86ISD::PEXTRW", SDTypeProfile<1, 2, []>, []>;
39def X86pinsrw : SDNode<"X86ISD::PINSRW", SDTypeProfile<1, 3, []>, []>;
40
41//===----------------------------------------------------------------------===//
42// SSE 'Special' Instructions
43//===----------------------------------------------------------------------===//
44
Chris Lattnerc90ee9c2008-01-10 07:59:24 +000045let isImplicitDef = 1 in {
Evan Chengb783fa32007-07-19 01:14:50 +000046def IMPLICIT_DEF_VR128 : I<0, Pseudo, (outs VR128:$dst), (ins),
Dan Gohmanf17a25c2007-07-18 16:29:46 +000047 "#IMPLICIT_DEF $dst",
48 [(set VR128:$dst, (v4f32 (undef)))]>,
49 Requires<[HasSSE1]>;
Evan Chengb783fa32007-07-19 01:14:50 +000050def IMPLICIT_DEF_FR32 : I<0, Pseudo, (outs FR32:$dst), (ins),
Dan Gohmanf17a25c2007-07-18 16:29:46 +000051 "#IMPLICIT_DEF $dst",
Dale Johannesene0e0fd02007-09-23 14:52:20 +000052 [(set FR32:$dst, (undef))]>, Requires<[HasSSE1]>;
Evan Chengb783fa32007-07-19 01:14:50 +000053def IMPLICIT_DEF_FR64 : I<0, Pseudo, (outs FR64:$dst), (ins),
Dan Gohmanf17a25c2007-07-18 16:29:46 +000054 "#IMPLICIT_DEF $dst",
55 [(set FR64:$dst, (undef))]>, Requires<[HasSSE2]>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +000056}
Dan Gohmanf17a25c2007-07-18 16:29:46 +000057
58//===----------------------------------------------------------------------===//
59// SSE Complex Patterns
60//===----------------------------------------------------------------------===//
61
62// These are 'extloads' from a scalar to the low element of a vector, zeroing
63// the top elements. These are used for the SSE 'ss' and 'sd' instruction
64// forms.
65def sse_load_f32 : ComplexPattern<v4f32, 4, "SelectScalarSSELoad", [],
Chris Lattnerc90ee9c2008-01-10 07:59:24 +000066 [SDNPHasChain, SDNPMayLoad]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +000067def sse_load_f64 : ComplexPattern<v2f64, 4, "SelectScalarSSELoad", [],
Chris Lattnerc90ee9c2008-01-10 07:59:24 +000068 [SDNPHasChain, SDNPMayLoad]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +000069
70def ssmem : Operand<v4f32> {
71 let PrintMethod = "printf32mem";
72 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
73}
74def sdmem : Operand<v2f64> {
75 let PrintMethod = "printf64mem";
76 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
77}
78
79//===----------------------------------------------------------------------===//
80// SSE pattern fragments
81//===----------------------------------------------------------------------===//
82
Dan Gohmanf17a25c2007-07-18 16:29:46 +000083def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
84def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
85def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
86def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
87
Dan Gohman11821702007-07-27 17:16:43 +000088// Like 'store', but always requires vector alignment.
Dan Gohman4a4f1512007-07-18 20:23:34 +000089def alignedstore : PatFrag<(ops node:$val, node:$ptr),
90 (st node:$val, node:$ptr), [{
91 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
92 return !ST->isTruncatingStore() &&
93 ST->getAddressingMode() == ISD::UNINDEXED &&
Dan Gohman11821702007-07-27 17:16:43 +000094 ST->getAlignment() >= 16;
Dan Gohman4a4f1512007-07-18 20:23:34 +000095 return false;
96}]>;
97
Dan Gohman11821702007-07-27 17:16:43 +000098// Like 'load', but always requires vector alignment.
Dan Gohman4a4f1512007-07-18 20:23:34 +000099def alignedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
100 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
101 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
102 LD->getAddressingMode() == ISD::UNINDEXED &&
Dan Gohman11821702007-07-27 17:16:43 +0000103 LD->getAlignment() >= 16;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000104 return false;
105}]>;
106
Dan Gohman11821702007-07-27 17:16:43 +0000107def alignedloadfsf32 : PatFrag<(ops node:$ptr), (f32 (alignedload node:$ptr))>;
108def alignedloadfsf64 : PatFrag<(ops node:$ptr), (f64 (alignedload node:$ptr))>;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000109def alignedloadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (alignedload node:$ptr))>;
110def alignedloadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (alignedload node:$ptr))>;
111def alignedloadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (alignedload node:$ptr))>;
112def alignedloadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (alignedload node:$ptr))>;
113
114// Like 'load', but uses special alignment checks suitable for use in
115// memory operands in most SSE instructions, which are required to
116// be naturally aligned on some targets but not on others.
117// FIXME: Actually implement support for targets that don't require the
118// alignment. This probably wants a subtarget predicate.
119def memop : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
120 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
121 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
122 LD->getAddressingMode() == ISD::UNINDEXED &&
Dan Gohman11821702007-07-27 17:16:43 +0000123 LD->getAlignment() >= 16;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000124 return false;
125}]>;
126
Dan Gohman11821702007-07-27 17:16:43 +0000127def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
128def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000129def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
130def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
131def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
132def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
133
Bill Wendling3b15d722007-08-11 09:52:53 +0000134// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
135// 16-byte boundary.
136def memop64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
137 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
138 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
139 LD->getAddressingMode() == ISD::UNINDEXED &&
140 LD->getAlignment() >= 8;
141 return false;
142}]>;
143
144def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
145def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop64 node:$ptr))>;
146def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
147def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
148def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
149
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000150def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
151def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
152def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
153def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
154def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
155def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
156
157def fp32imm0 : PatLeaf<(f32 fpimm), [{
158 return N->isExactlyValue(+0.0);
159}]>;
160
161def PSxLDQ_imm : SDNodeXForm<imm, [{
162 // Transformation function: imm >> 3
163 return getI32Imm(N->getValue() >> 3);
164}]>;
165
166// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
167// SHUFP* etc. imm.
168def SHUFFLE_get_shuf_imm : SDNodeXForm<build_vector, [{
169 return getI8Imm(X86::getShuffleSHUFImmediate(N));
170}]>;
171
172// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
173// PSHUFHW imm.
174def SHUFFLE_get_pshufhw_imm : SDNodeXForm<build_vector, [{
175 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
176}]>;
177
178// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
179// PSHUFLW imm.
180def SHUFFLE_get_pshuflw_imm : SDNodeXForm<build_vector, [{
181 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
182}]>;
183
184def SSE_splat_mask : PatLeaf<(build_vector), [{
185 return X86::isSplatMask(N);
186}], SHUFFLE_get_shuf_imm>;
187
188def SSE_splat_lo_mask : PatLeaf<(build_vector), [{
189 return X86::isSplatLoMask(N);
190}]>;
191
192def MOVHLPS_shuffle_mask : PatLeaf<(build_vector), [{
193 return X86::isMOVHLPSMask(N);
194}]>;
195
196def MOVHLPS_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
197 return X86::isMOVHLPS_v_undef_Mask(N);
198}]>;
199
200def MOVHP_shuffle_mask : PatLeaf<(build_vector), [{
201 return X86::isMOVHPMask(N);
202}]>;
203
204def MOVLP_shuffle_mask : PatLeaf<(build_vector), [{
205 return X86::isMOVLPMask(N);
206}]>;
207
208def MOVL_shuffle_mask : PatLeaf<(build_vector), [{
209 return X86::isMOVLMask(N);
210}]>;
211
212def MOVSHDUP_shuffle_mask : PatLeaf<(build_vector), [{
213 return X86::isMOVSHDUPMask(N);
214}]>;
215
216def MOVSLDUP_shuffle_mask : PatLeaf<(build_vector), [{
217 return X86::isMOVSLDUPMask(N);
218}]>;
219
220def UNPCKL_shuffle_mask : PatLeaf<(build_vector), [{
221 return X86::isUNPCKLMask(N);
222}]>;
223
224def UNPCKH_shuffle_mask : PatLeaf<(build_vector), [{
225 return X86::isUNPCKHMask(N);
226}]>;
227
228def UNPCKL_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
229 return X86::isUNPCKL_v_undef_Mask(N);
230}]>;
231
232def UNPCKH_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
233 return X86::isUNPCKH_v_undef_Mask(N);
234}]>;
235
236def PSHUFD_shuffle_mask : PatLeaf<(build_vector), [{
237 return X86::isPSHUFDMask(N);
238}], SHUFFLE_get_shuf_imm>;
239
240def PSHUFHW_shuffle_mask : PatLeaf<(build_vector), [{
241 return X86::isPSHUFHWMask(N);
242}], SHUFFLE_get_pshufhw_imm>;
243
244def PSHUFLW_shuffle_mask : PatLeaf<(build_vector), [{
245 return X86::isPSHUFLWMask(N);
246}], SHUFFLE_get_pshuflw_imm>;
247
248def SHUFP_unary_shuffle_mask : PatLeaf<(build_vector), [{
249 return X86::isPSHUFDMask(N);
250}], SHUFFLE_get_shuf_imm>;
251
252def SHUFP_shuffle_mask : PatLeaf<(build_vector), [{
253 return X86::isSHUFPMask(N);
254}], SHUFFLE_get_shuf_imm>;
255
256def PSHUFD_binary_shuffle_mask : PatLeaf<(build_vector), [{
257 return X86::isSHUFPMask(N);
258}], SHUFFLE_get_shuf_imm>;
259
260//===----------------------------------------------------------------------===//
261// SSE scalar FP Instructions
262//===----------------------------------------------------------------------===//
263
264// CMOV* - Used to implement the SSE SELECT DAG operation. Expanded by the
265// scheduler into a branch sequence.
Evan Cheng950aac02007-09-25 01:57:46 +0000266// These are expanded by the scheduler.
267let Uses = [EFLAGS], usesCustomDAGSchedInserter = 1 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000268 def CMOV_FR32 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000269 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000270 "#CMOV_FR32 PSEUDO!",
Evan Cheng621216e2007-09-29 00:00:36 +0000271 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
272 EFLAGS))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000273 def CMOV_FR64 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000274 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000275 "#CMOV_FR64 PSEUDO!",
Evan Cheng621216e2007-09-29 00:00:36 +0000276 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
277 EFLAGS))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000278 def CMOV_V4F32 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000279 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000280 "#CMOV_V4F32 PSEUDO!",
281 [(set VR128:$dst,
Evan Cheng621216e2007-09-29 00:00:36 +0000282 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
283 EFLAGS)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000284 def CMOV_V2F64 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000285 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000286 "#CMOV_V2F64 PSEUDO!",
287 [(set VR128:$dst,
Evan Cheng621216e2007-09-29 00:00:36 +0000288 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
289 EFLAGS)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000290 def CMOV_V2I64 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000291 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000292 "#CMOV_V2I64 PSEUDO!",
293 [(set VR128:$dst,
Evan Cheng621216e2007-09-29 00:00:36 +0000294 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
Evan Cheng950aac02007-09-25 01:57:46 +0000295 EFLAGS)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000296}
297
298//===----------------------------------------------------------------------===//
299// SSE1 Instructions
300//===----------------------------------------------------------------------===//
301
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000302// Move Instructions
Chris Lattnerd1a9eb62008-01-11 06:59:07 +0000303let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000304def MOVSSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000305 "movss\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +0000306let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000307def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000308 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000309 [(set FR32:$dst, (loadf32 addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000310def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000311 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000312 [(store FR32:$src, addr:$dst)]>;
313
314// Conversion instructions
Evan Chengb783fa32007-07-19 01:14:50 +0000315def CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000316 "cvttss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000317 [(set GR32:$dst, (fp_to_sint FR32:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000318def CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000319 "cvttss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000320 [(set GR32:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000321def CVTSI2SSrr : SSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000322 "cvtsi2ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000323 [(set FR32:$dst, (sint_to_fp GR32:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000324def CVTSI2SSrm : SSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000325 "cvtsi2ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000326 [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
327
328// Match intrinsics which expect XMM operand(s).
Evan Chengb783fa32007-07-19 01:14:50 +0000329def Int_CVTSS2SIrr : SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000330 "cvtss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000331 [(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000332def Int_CVTSS2SIrm : SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000333 "cvtss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000334 [(set GR32:$dst, (int_x86_sse_cvtss2si
335 (load addr:$src)))]>;
336
Dale Johannesen1fbb4a52007-10-30 22:15:38 +0000337// Match intrinisics which expect MM and XMM operand(s).
338def Int_CVTPS2PIrr : PSI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
339 "cvtps2pi\t{$src, $dst|$dst, $src}",
340 [(set VR64:$dst, (int_x86_sse_cvtps2pi VR128:$src))]>;
341def Int_CVTPS2PIrm : PSI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
342 "cvtps2pi\t{$src, $dst|$dst, $src}",
343 [(set VR64:$dst, (int_x86_sse_cvtps2pi
344 (load addr:$src)))]>;
345def Int_CVTTPS2PIrr: PSI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
346 "cvttps2pi\t{$src, $dst|$dst, $src}",
347 [(set VR64:$dst, (int_x86_sse_cvttps2pi VR128:$src))]>;
348def Int_CVTTPS2PIrm: PSI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
349 "cvttps2pi\t{$src, $dst|$dst, $src}",
350 [(set VR64:$dst, (int_x86_sse_cvttps2pi
351 (load addr:$src)))]>;
352let isTwoAddress = 1 in {
353 def Int_CVTPI2PSrr : PSI<0x2A, MRMSrcReg,
354 (outs VR128:$dst), (ins VR128:$src1, VR64:$src2),
355 "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
356 [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
357 VR64:$src2))]>;
358 def Int_CVTPI2PSrm : PSI<0x2A, MRMSrcMem,
359 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
360 "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
361 [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
362 (load addr:$src2)))]>;
363}
364
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000365// Aliases for intrinsics
Evan Chengb783fa32007-07-19 01:14:50 +0000366def Int_CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000367 "cvttss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000368 [(set GR32:$dst,
369 (int_x86_sse_cvttss2si VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000370def Int_CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000371 "cvttss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000372 [(set GR32:$dst,
373 (int_x86_sse_cvttss2si(load addr:$src)))]>;
374
375let isTwoAddress = 1 in {
376 def Int_CVTSI2SSrr : SSI<0x2A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000377 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000378 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000379 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
380 GR32:$src2))]>;
381 def Int_CVTSI2SSrm : SSI<0x2A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000382 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000383 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000384 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
385 (loadi32 addr:$src2)))]>;
386}
387
388// Comparison instructions
389let isTwoAddress = 1 in {
Chris Lattnerd1a9eb62008-01-11 06:59:07 +0000390let neverHasSideEffects = 1 in
Chris Lattnera9f545f2007-12-16 20:12:41 +0000391 def CMPSSrr : SSIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000392 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000393 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +0000394let neverHasSideEffects = 1, mayLoad = 1 in
Chris Lattnera9f545f2007-12-16 20:12:41 +0000395 def CMPSSrm : SSIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000396 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000397 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000398}
399
Evan Cheng55687072007-09-14 21:48:26 +0000400let Defs = [EFLAGS] in {
Evan Chengb783fa32007-07-19 01:14:50 +0000401def UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000402 "ucomiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000403 [(X86cmp FR32:$src1, FR32:$src2), (implicit EFLAGS)]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000404def UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000405 "ucomiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000406 [(X86cmp FR32:$src1, (loadf32 addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +0000407 (implicit EFLAGS)]>;
Evan Cheng55687072007-09-14 21:48:26 +0000408} // Defs = [EFLAGS]
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000409
410// Aliases to match intrinsics which expect XMM operand(s).
411let isTwoAddress = 1 in {
Chris Lattnera9f545f2007-12-16 20:12:41 +0000412 def Int_CMPSSrr : SSIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000413 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000414 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000415 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
416 VR128:$src, imm:$cc))]>;
Chris Lattnera9f545f2007-12-16 20:12:41 +0000417 def Int_CMPSSrm : SSIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000418 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000419 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000420 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
421 (load addr:$src), imm:$cc))]>;
422}
423
Evan Cheng55687072007-09-14 21:48:26 +0000424let Defs = [EFLAGS] in {
Evan Cheng621216e2007-09-29 00:00:36 +0000425def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs),
Evan Cheng950aac02007-09-25 01:57:46 +0000426 (ins VR128:$src1, VR128:$src2),
427 "ucomiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000428 [(X86ucomi (v4f32 VR128:$src1), VR128:$src2),
Evan Cheng950aac02007-09-25 01:57:46 +0000429 (implicit EFLAGS)]>;
Evan Cheng621216e2007-09-29 00:00:36 +0000430def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs),
Evan Cheng950aac02007-09-25 01:57:46 +0000431 (ins VR128:$src1, f128mem:$src2),
432 "ucomiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000433 [(X86ucomi (v4f32 VR128:$src1), (load addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +0000434 (implicit EFLAGS)]>;
435
Evan Cheng621216e2007-09-29 00:00:36 +0000436def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs),
Evan Cheng950aac02007-09-25 01:57:46 +0000437 (ins VR128:$src1, VR128:$src2),
438 "comiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000439 [(X86comi (v4f32 VR128:$src1), VR128:$src2),
Evan Cheng950aac02007-09-25 01:57:46 +0000440 (implicit EFLAGS)]>;
Evan Cheng621216e2007-09-29 00:00:36 +0000441def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs),
Evan Cheng950aac02007-09-25 01:57:46 +0000442 (ins VR128:$src1, f128mem:$src2),
443 "comiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000444 [(X86comi (v4f32 VR128:$src1), (load addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +0000445 (implicit EFLAGS)]>;
Evan Cheng55687072007-09-14 21:48:26 +0000446} // Defs = [EFLAGS]
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000447
448// Aliases of packed SSE1 instructions for scalar use. These all have names that
449// start with 'Fs'.
450
451// Alias instructions that map fld0 to pxor for sse.
Chris Lattner17dab4a2008-01-10 05:45:39 +0000452let isReMaterializable = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000453def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +0000454 "pxor\t$dst, $dst", [(set FR32:$dst, fp32imm0)]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000455 Requires<[HasSSE1]>, TB, OpSize;
456
457// Alias instruction to do FR32 reg-to-reg copy using movaps. Upper bits are
458// disregarded.
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000459let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000460def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000461 "movaps\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000462
463// Alias instruction to load FR32 from f128mem using movaps. Upper bits are
464// disregarded.
Chris Lattner1a1932c2008-01-06 23:38:27 +0000465let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000466def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000467 "movaps\t{$src, $dst|$dst, $src}",
Dan Gohman11821702007-07-27 17:16:43 +0000468 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000469
470// Alias bitwise logical operations using SSE logical ops on packed FP values.
471let isTwoAddress = 1 in {
472let isCommutable = 1 in {
Evan Chengb783fa32007-07-19 01:14:50 +0000473 def FsANDPSrr : PSI<0x54, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000474 "andps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000475 [(set FR32:$dst, (X86fand FR32:$src1, FR32:$src2))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000476 def FsORPSrr : PSI<0x56, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000477 "orps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000478 [(set FR32:$dst, (X86for FR32:$src1, FR32:$src2))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000479 def FsXORPSrr : PSI<0x57, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000480 "xorps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000481 [(set FR32:$dst, (X86fxor FR32:$src1, FR32:$src2))]>;
482}
483
Evan Chengb783fa32007-07-19 01:14:50 +0000484def FsANDPSrm : PSI<0x54, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000485 "andps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000486 [(set FR32:$dst, (X86fand FR32:$src1,
Dan Gohman11821702007-07-27 17:16:43 +0000487 (memopfsf32 addr:$src2)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000488def FsORPSrm : PSI<0x56, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000489 "orps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000490 [(set FR32:$dst, (X86for FR32:$src1,
Dan Gohman11821702007-07-27 17:16:43 +0000491 (memopfsf32 addr:$src2)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000492def FsXORPSrm : PSI<0x57, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000493 "xorps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000494 [(set FR32:$dst, (X86fxor FR32:$src1,
Dan Gohman11821702007-07-27 17:16:43 +0000495 (memopfsf32 addr:$src2)))]>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000496let neverHasSideEffects = 1 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000497def FsANDNPSrr : PSI<0x55, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000498 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000499 "andnps\t{$src2, $dst|$dst, $src2}", []>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000500
501let mayLoad = 1 in
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000502def FsANDNPSrm : PSI<0x55, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000503 (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000504 "andnps\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000505}
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000506}
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000507
508/// basic_sse1_fp_binop_rm - SSE1 binops come in both scalar and vector forms.
509///
510/// In addition, we also have a special variant of the scalar form here to
511/// represent the associated intrinsic operation. This form is unlike the
512/// plain scalar form, in that it takes an entire vector (instead of a scalar)
513/// and leaves the top elements undefined.
514///
515/// These three forms can each be reg+reg or reg+mem, so there are a total of
516/// six "instructions".
517///
518let isTwoAddress = 1 in {
519multiclass basic_sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
520 SDNode OpNode, Intrinsic F32Int,
521 bit Commutable = 0> {
522 // Scalar operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000523 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000524 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000525 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
526 let isCommutable = Commutable;
527 }
528
529 // Scalar operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000530 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000531 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000532 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
533
534 // Vector operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000535 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000536 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000537 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
538 let isCommutable = Commutable;
539 }
540
541 // Vector operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000542 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000543 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +0000544 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000545
546 // Intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000547 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000548 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000549 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
550 let isCommutable = Commutable;
551 }
552
553 // Intrinsic operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000554 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000555 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000556 [(set VR128:$dst, (F32Int VR128:$src1,
557 sse_load_f32:$src2))]>;
558}
559}
560
561// Arithmetic instructions
562defm ADD : basic_sse1_fp_binop_rm<0x58, "add", fadd, int_x86_sse_add_ss, 1>;
563defm MUL : basic_sse1_fp_binop_rm<0x59, "mul", fmul, int_x86_sse_mul_ss, 1>;
564defm SUB : basic_sse1_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse_sub_ss>;
565defm DIV : basic_sse1_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse_div_ss>;
566
567/// sse1_fp_binop_rm - Other SSE1 binops
568///
569/// This multiclass is like basic_sse1_fp_binop_rm, with the addition of
570/// instructions for a full-vector intrinsic form. Operations that map
571/// onto C operators don't use this form since they just use the plain
572/// vector form instead of having a separate vector intrinsic form.
573///
574/// This provides a total of eight "instructions".
575///
576let isTwoAddress = 1 in {
577multiclass sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
578 SDNode OpNode,
579 Intrinsic F32Int,
580 Intrinsic V4F32Int,
581 bit Commutable = 0> {
582
583 // Scalar operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000584 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000585 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000586 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
587 let isCommutable = Commutable;
588 }
589
590 // Scalar operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000591 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000592 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000593 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
594
595 // Vector operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000596 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000597 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000598 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
599 let isCommutable = Commutable;
600 }
601
602 // Vector operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000603 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000604 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +0000605 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000606
607 // Intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000608 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000609 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000610 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
611 let isCommutable = Commutable;
612 }
613
614 // Intrinsic operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000615 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000616 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000617 [(set VR128:$dst, (F32Int VR128:$src1,
618 sse_load_f32:$src2))]>;
619
620 // Vector intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000621 def PSrr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000622 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000623 [(set VR128:$dst, (V4F32Int VR128:$src1, VR128:$src2))]> {
624 let isCommutable = Commutable;
625 }
626
627 // Vector intrinsic operation, reg+mem.
Dan Gohmanc747be52007-08-02 21:06:40 +0000628 def PSrm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000629 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000630 [(set VR128:$dst, (V4F32Int VR128:$src1, (load addr:$src2)))]>;
631}
632}
633
634defm MAX : sse1_fp_binop_rm<0x5F, "max", X86fmax,
635 int_x86_sse_max_ss, int_x86_sse_max_ps>;
636defm MIN : sse1_fp_binop_rm<0x5D, "min", X86fmin,
637 int_x86_sse_min_ss, int_x86_sse_min_ps>;
638
639//===----------------------------------------------------------------------===//
640// SSE packed FP Instructions
641
642// Move Instructions
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000643let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000644def MOVAPSrr : PSI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000645 "movaps\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +0000646let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000647def MOVAPSrm : PSI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000648 "movaps\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000649 [(set VR128:$dst, (alignedloadv4f32 addr:$src))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000650
Evan Chengb783fa32007-07-19 01:14:50 +0000651def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000652 "movaps\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000653 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000654
Chris Lattnerd1a9eb62008-01-11 06:59:07 +0000655let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000656def MOVUPSrr : PSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000657 "movups\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +0000658let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000659def MOVUPSrm : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000660 "movups\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000661 [(set VR128:$dst, (loadv4f32 addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000662def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000663 "movups\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000664 [(store (v4f32 VR128:$src), addr:$dst)]>;
665
666// Intrinsic forms of MOVUPS load and store
Chris Lattner1a1932c2008-01-06 23:38:27 +0000667let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000668def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000669 "movups\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000670 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000671def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000672 "movups\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000673 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000674
675let isTwoAddress = 1 in {
676 let AddedComplexity = 20 in {
677 def MOVLPSrm : PSI<0x12, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000678 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000679 "movlps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000680 [(set VR128:$dst,
681 (v4f32 (vector_shuffle VR128:$src1,
682 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
683 MOVLP_shuffle_mask)))]>;
684 def MOVHPSrm : PSI<0x16, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000685 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000686 "movhps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000687 [(set VR128:$dst,
688 (v4f32 (vector_shuffle VR128:$src1,
689 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
690 MOVHP_shuffle_mask)))]>;
691 } // AddedComplexity
692} // isTwoAddress
693
Evan Chengb783fa32007-07-19 01:14:50 +0000694def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000695 "movlps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000696 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
697 (iPTR 0))), addr:$dst)]>;
698
699// v2f64 extract element 1 is always custom lowered to unpack high to low
700// and extract element 0 so the non-store version isn't too horrible.
Evan Chengb783fa32007-07-19 01:14:50 +0000701def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000702 "movhps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000703 [(store (f64 (vector_extract
704 (v2f64 (vector_shuffle
705 (bc_v2f64 (v4f32 VR128:$src)), (undef),
706 UNPCKH_shuffle_mask)), (iPTR 0))),
707 addr:$dst)]>;
708
709let isTwoAddress = 1 in {
710let AddedComplexity = 15 in {
Evan Chengb783fa32007-07-19 01:14:50 +0000711def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000712 "movlhps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000713 [(set VR128:$dst,
714 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
715 MOVHP_shuffle_mask)))]>;
716
Evan Chengb783fa32007-07-19 01:14:50 +0000717def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000718 "movhlps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000719 [(set VR128:$dst,
720 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
721 MOVHLPS_shuffle_mask)))]>;
722} // AddedComplexity
723} // isTwoAddress
724
725
726
727// Arithmetic
728
729/// sse1_fp_unop_rm - SSE1 unops come in both scalar and vector forms.
730///
731/// In addition, we also have a special variant of the scalar form here to
732/// represent the associated intrinsic operation. This form is unlike the
733/// plain scalar form, in that it takes an entire vector (instead of a
734/// scalar) and leaves the top elements undefined.
735///
736/// And, we have a special variant form for a full-vector intrinsic form.
737///
738/// These four forms can each have a reg or a mem operand, so there are a
739/// total of eight "instructions".
740///
741multiclass sse1_fp_unop_rm<bits<8> opc, string OpcodeStr,
742 SDNode OpNode,
743 Intrinsic F32Int,
744 Intrinsic V4F32Int,
745 bit Commutable = 0> {
746 // Scalar operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000747 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000748 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000749 [(set FR32:$dst, (OpNode FR32:$src))]> {
750 let isCommutable = Commutable;
751 }
752
753 // Scalar operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000754 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000755 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000756 [(set FR32:$dst, (OpNode (load addr:$src)))]>;
757
758 // Vector operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000759 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000760 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000761 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]> {
762 let isCommutable = Commutable;
763 }
764
765 // Vector operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000766 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000767 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +0000768 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000769
770 // Intrinsic operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000771 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000772 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000773 [(set VR128:$dst, (F32Int VR128:$src))]> {
774 let isCommutable = Commutable;
775 }
776
777 // Intrinsic operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000778 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000779 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000780 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
781
782 // Vector intrinsic operation, reg
Evan Chengb783fa32007-07-19 01:14:50 +0000783 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000784 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000785 [(set VR128:$dst, (V4F32Int VR128:$src))]> {
786 let isCommutable = Commutable;
787 }
788
789 // Vector intrinsic operation, mem
Dan Gohmanc747be52007-08-02 21:06:40 +0000790 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000791 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000792 [(set VR128:$dst, (V4F32Int (load addr:$src)))]>;
793}
794
795// Square root.
796defm SQRT : sse1_fp_unop_rm<0x51, "sqrt", fsqrt,
797 int_x86_sse_sqrt_ss, int_x86_sse_sqrt_ps>;
798
799// Reciprocal approximations. Note that these typically require refinement
800// in order to obtain suitable precision.
801defm RSQRT : sse1_fp_unop_rm<0x52, "rsqrt", X86frsqrt,
802 int_x86_sse_rsqrt_ss, int_x86_sse_rsqrt_ps>;
803defm RCP : sse1_fp_unop_rm<0x53, "rcp", X86frcp,
804 int_x86_sse_rcp_ss, int_x86_sse_rcp_ps>;
805
806// Logical
807let isTwoAddress = 1 in {
808 let isCommutable = 1 in {
809 def ANDPSrr : PSI<0x54, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000810 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000811 "andps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000812 [(set VR128:$dst, (v2i64
813 (and VR128:$src1, VR128:$src2)))]>;
814 def ORPSrr : PSI<0x56, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000815 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000816 "orps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000817 [(set VR128:$dst, (v2i64
818 (or VR128:$src1, VR128:$src2)))]>;
819 def XORPSrr : PSI<0x57, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000820 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000821 "xorps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000822 [(set VR128:$dst, (v2i64
823 (xor VR128:$src1, VR128:$src2)))]>;
824 }
825
826 def ANDPSrm : PSI<0x54, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000827 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000828 "andps\t{$src2, $dst|$dst, $src2}",
Evan Cheng8e92cd12007-07-19 23:34:10 +0000829 [(set VR128:$dst, (and (bc_v2i64 (v4f32 VR128:$src1)),
830 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000831 def ORPSrm : PSI<0x56, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000832 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000833 "orps\t{$src2, $dst|$dst, $src2}",
Evan Cheng8e92cd12007-07-19 23:34:10 +0000834 [(set VR128:$dst, (or (bc_v2i64 (v4f32 VR128:$src1)),
835 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000836 def XORPSrm : PSI<0x57, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000837 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000838 "xorps\t{$src2, $dst|$dst, $src2}",
Evan Cheng8e92cd12007-07-19 23:34:10 +0000839 [(set VR128:$dst, (xor (bc_v2i64 (v4f32 VR128:$src1)),
840 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000841 def ANDNPSrr : PSI<0x55, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000842 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000843 "andnps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000844 [(set VR128:$dst,
845 (v2i64 (and (xor VR128:$src1,
846 (bc_v2i64 (v4i32 immAllOnesV))),
847 VR128:$src2)))]>;
848 def ANDNPSrm : PSI<0x55, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000849 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000850 "andnps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000851 [(set VR128:$dst,
Evan Cheng8e92cd12007-07-19 23:34:10 +0000852 (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000853 (bc_v2i64 (v4i32 immAllOnesV))),
Evan Cheng8e92cd12007-07-19 23:34:10 +0000854 (memopv2i64 addr:$src2))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000855}
856
857let isTwoAddress = 1 in {
858 def CMPPSrri : PSIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000859 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000860 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000861 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
862 VR128:$src, imm:$cc))]>;
863 def CMPPSrmi : PSIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000864 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000865 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000866 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
867 (load addr:$src), imm:$cc))]>;
868}
869
870// Shuffle and unpack instructions
871let isTwoAddress = 1 in {
872 let isConvertibleToThreeAddress = 1 in // Convert to pshufd
873 def SHUFPSrri : PSIi8<0xC6, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000874 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000875 VR128:$src2, i32i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +0000876 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000877 [(set VR128:$dst,
878 (v4f32 (vector_shuffle
879 VR128:$src1, VR128:$src2,
880 SHUFP_shuffle_mask:$src3)))]>;
881 def SHUFPSrmi : PSIi8<0xC6, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000882 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000883 f128mem:$src2, i32i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +0000884 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000885 [(set VR128:$dst,
886 (v4f32 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +0000887 VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000888 SHUFP_shuffle_mask:$src3)))]>;
889
890 let AddedComplexity = 10 in {
891 def UNPCKHPSrr : PSI<0x15, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000892 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000893 "unpckhps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000894 [(set VR128:$dst,
895 (v4f32 (vector_shuffle
896 VR128:$src1, VR128:$src2,
897 UNPCKH_shuffle_mask)))]>;
898 def UNPCKHPSrm : PSI<0x15, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000899 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000900 "unpckhps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000901 [(set VR128:$dst,
902 (v4f32 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +0000903 VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000904 UNPCKH_shuffle_mask)))]>;
905
906 def UNPCKLPSrr : PSI<0x14, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000907 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000908 "unpcklps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000909 [(set VR128:$dst,
910 (v4f32 (vector_shuffle
911 VR128:$src1, VR128:$src2,
912 UNPCKL_shuffle_mask)))]>;
913 def UNPCKLPSrm : PSI<0x14, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000914 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000915 "unpcklps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000916 [(set VR128:$dst,
917 (v4f32 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +0000918 VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000919 UNPCKL_shuffle_mask)))]>;
920 } // AddedComplexity
921} // isTwoAddress
922
923// Mask creation
Evan Chengb783fa32007-07-19 01:14:50 +0000924def MOVMSKPSrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000925 "movmskps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000926 [(set GR32:$dst, (int_x86_sse_movmsk_ps VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000927def MOVMSKPDrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000928 "movmskpd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000929 [(set GR32:$dst, (int_x86_sse2_movmsk_pd VR128:$src))]>;
930
931// Prefetching loads.
932// TODO: no intrinsics for these?
Dan Gohman91888f02007-07-31 20:11:57 +0000933def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src), "prefetcht0\t$src", []>;
934def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src), "prefetcht1\t$src", []>;
935def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src), "prefetcht2\t$src", []>;
936def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src), "prefetchnta\t$src", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000937
938// Non-temporal stores
Evan Chengb783fa32007-07-19 01:14:50 +0000939def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000940 "movntps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000941 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
942
943// Load, store, and memory fence
Evan Chengb783fa32007-07-19 01:14:50 +0000944def SFENCE : PSI<0xAE, MRM7m, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000945
946// MXCSR register
Evan Chengb783fa32007-07-19 01:14:50 +0000947def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000948 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000949def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
Dan Gohman91888f02007-07-31 20:11:57 +0000950 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000951
952// Alias instructions that map zero vector to pxor / xorp* for sse.
Chris Lattner17dab4a2008-01-10 05:45:39 +0000953let isReMaterializable = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000954def V_SET0 : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +0000955 "xorps\t$dst, $dst",
Chris Lattnere6aa3862007-11-25 00:24:49 +0000956 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000957
958// FR32 to 128-bit vector conversion.
Evan Chengb783fa32007-07-19 01:14:50 +0000959def MOVSS2PSrr : SSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000960 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000961 [(set VR128:$dst,
962 (v4f32 (scalar_to_vector FR32:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000963def MOVSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000964 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000965 [(set VR128:$dst,
966 (v4f32 (scalar_to_vector (loadf32 addr:$src))))]>;
967
968// FIXME: may not be able to eliminate this movss with coalescing the src and
969// dest register classes are different. We really want to write this pattern
970// like this:
971// def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
972// (f32 FR32:$src)>;
Evan Chengb783fa32007-07-19 01:14:50 +0000973def MOVPS2SSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000974 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000975 [(set FR32:$dst, (vector_extract (v4f32 VR128:$src),
976 (iPTR 0)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000977def MOVPS2SSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000978 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000979 [(store (f32 (vector_extract (v4f32 VR128:$src),
980 (iPTR 0))), addr:$dst)]>;
981
982
983// Move to lower bits of a VR128, leaving upper bits alone.
984// Three operand (but two address) aliases.
985let isTwoAddress = 1 in {
Chris Lattnerd1a9eb62008-01-11 06:59:07 +0000986let neverHasSideEffects = 1 in
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000987 def MOVLSS2PSrr : SSI<0x10, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000988 (outs VR128:$dst), (ins VR128:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000989 "movss\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000990
991 let AddedComplexity = 15 in
992 def MOVLPSrr : SSI<0x10, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000993 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000994 "movss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000995 [(set VR128:$dst,
996 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
997 MOVL_shuffle_mask)))]>;
998}
999
1000// Move to lower bits of a VR128 and zeroing upper bits.
1001// Loading from memory automatically zeroing upper bits.
1002let AddedComplexity = 20 in
Evan Chengb783fa32007-07-19 01:14:50 +00001003def MOVZSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001004 "movss\t{$src, $dst|$dst, $src}",
Chris Lattnere6aa3862007-11-25 00:24:49 +00001005 [(set VR128:$dst, (v4f32 (vector_shuffle immAllZerosV_bc,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001006 (v4f32 (scalar_to_vector (loadf32 addr:$src))),
1007 MOVL_shuffle_mask)))]>;
1008
1009
1010//===----------------------------------------------------------------------===//
1011// SSE2 Instructions
1012//===----------------------------------------------------------------------===//
1013
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001014// Move Instructions
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001015let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001016def MOVSDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001017 "movsd\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +00001018let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001019def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001020 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001021 [(set FR64:$dst, (loadf64 addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001022def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001023 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001024 [(store FR64:$src, addr:$dst)]>;
1025
1026// Conversion instructions
Evan Chengb783fa32007-07-19 01:14:50 +00001027def CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001028 "cvttsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001029 [(set GR32:$dst, (fp_to_sint FR64:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001030def CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001031 "cvttsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001032 [(set GR32:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001033def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001034 "cvtsd2ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001035 [(set FR32:$dst, (fround FR64:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001036def CVTSD2SSrm : SDI<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001037 "cvtsd2ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001038 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001039def CVTSI2SDrr : SDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001040 "cvtsi2sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001041 [(set FR64:$dst, (sint_to_fp GR32:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001042def CVTSI2SDrm : SDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001043 "cvtsi2sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001044 [(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
1045
1046// SSE2 instructions with XS prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001047def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001048 "cvtss2sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001049 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
1050 Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001051def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001052 "cvtss2sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001053 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
1054 Requires<[HasSSE2]>;
1055
1056// Match intrinsics which expect XMM operand(s).
Evan Chengb783fa32007-07-19 01:14:50 +00001057def Int_CVTSD2SIrr : SDI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001058 "cvtsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001059 [(set GR32:$dst, (int_x86_sse2_cvtsd2si VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001060def Int_CVTSD2SIrm : SDI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001061 "cvtsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001062 [(set GR32:$dst, (int_x86_sse2_cvtsd2si
1063 (load addr:$src)))]>;
1064
Dale Johannesen1fbb4a52007-10-30 22:15:38 +00001065// Match intrinisics which expect MM and XMM operand(s).
1066def Int_CVTPD2PIrr : PDI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
1067 "cvtpd2pi\t{$src, $dst|$dst, $src}",
1068 [(set VR64:$dst, (int_x86_sse_cvtpd2pi VR128:$src))]>;
1069def Int_CVTPD2PIrm : PDI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
1070 "cvtpd2pi\t{$src, $dst|$dst, $src}",
1071 [(set VR64:$dst, (int_x86_sse_cvtpd2pi
1072 (load addr:$src)))]>;
1073def Int_CVTTPD2PIrr: PDI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
1074 "cvttpd2pi\t{$src, $dst|$dst, $src}",
1075 [(set VR64:$dst, (int_x86_sse_cvttpd2pi VR128:$src))]>;
1076def Int_CVTTPD2PIrm: PDI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
1077 "cvttpd2pi\t{$src, $dst|$dst, $src}",
1078 [(set VR64:$dst, (int_x86_sse_cvttpd2pi
1079 (load addr:$src)))]>;
1080def Int_CVTPI2PDrr : PDI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
1081 "cvtpi2pd\t{$src, $dst|$dst, $src}",
1082 [(set VR128:$dst, (int_x86_sse_cvtpi2pd VR64:$src))]>;
1083def Int_CVTPI2PDrm : PDI<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1084 "cvtpi2pd\t{$src, $dst|$dst, $src}",
1085 [(set VR128:$dst, (int_x86_sse_cvtpi2pd
1086 (load addr:$src)))]>;
1087
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001088// Aliases for intrinsics
Evan Chengb783fa32007-07-19 01:14:50 +00001089def Int_CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001090 "cvttsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001091 [(set GR32:$dst,
1092 (int_x86_sse2_cvttsd2si VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001093def Int_CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001094 "cvttsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001095 [(set GR32:$dst, (int_x86_sse2_cvttsd2si
1096 (load addr:$src)))]>;
1097
1098// Comparison instructions
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001099let isTwoAddress = 1, neverHasSideEffects = 1 in {
Evan Cheng653c7ac2007-12-20 19:57:09 +00001100 def CMPSDrr : SDIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001101 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001102 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001103let mayLoad = 1 in
Evan Cheng653c7ac2007-12-20 19:57:09 +00001104 def CMPSDrm : SDIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001105 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001106 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001107}
1108
Evan Cheng950aac02007-09-25 01:57:46 +00001109let Defs = [EFLAGS] in {
Evan Chengb783fa32007-07-19 01:14:50 +00001110def UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001111 "ucomisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001112 [(X86cmp FR64:$src1, FR64:$src2), (implicit EFLAGS)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001113def UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001114 "ucomisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001115 [(X86cmp FR64:$src1, (loadf64 addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +00001116 (implicit EFLAGS)]>;
1117}
1118
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001119// Aliases to match intrinsics which expect XMM operand(s).
1120let isTwoAddress = 1 in {
Evan Cheng653c7ac2007-12-20 19:57:09 +00001121 def Int_CMPSDrr : SDIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001122 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001123 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001124 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1125 VR128:$src, imm:$cc))]>;
Evan Cheng653c7ac2007-12-20 19:57:09 +00001126 def Int_CMPSDrm : SDIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001127 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001128 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001129 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1130 (load addr:$src), imm:$cc))]>;
1131}
1132
Evan Cheng950aac02007-09-25 01:57:46 +00001133let Defs = [EFLAGS] in {
Evan Chengb783fa32007-07-19 01:14:50 +00001134def Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001135 "ucomisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001136 [(X86ucomi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1137 (implicit EFLAGS)]>;
1138def Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001139 "ucomisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001140 [(X86ucomi (v2f64 VR128:$src1), (load addr:$src2)),
1141 (implicit EFLAGS)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001142
Evan Chengb783fa32007-07-19 01:14:50 +00001143def Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001144 "comisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001145 [(X86comi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1146 (implicit EFLAGS)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001147def Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001148 "comisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001149 [(X86comi (v2f64 VR128:$src1), (load addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +00001150 (implicit EFLAGS)]>;
1151} // Defs = EFLAGS]
1152
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001153// Aliases of packed SSE2 instructions for scalar use. These all have names that
1154// start with 'Fs'.
1155
1156// Alias instructions that map fld0 to pxor for sse.
Chris Lattner17dab4a2008-01-10 05:45:39 +00001157let isReMaterializable = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001158def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +00001159 "pxor\t$dst, $dst", [(set FR64:$dst, fpimm0)]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001160 Requires<[HasSSE2]>, TB, OpSize;
1161
1162// Alias instruction to do FR64 reg-to-reg copy using movapd. Upper bits are
1163// disregarded.
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001164let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001165def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001166 "movapd\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001167
1168// Alias instruction to load FR64 from f128mem using movapd. Upper bits are
1169// disregarded.
Chris Lattner1a1932c2008-01-06 23:38:27 +00001170let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001171def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001172 "movapd\t{$src, $dst|$dst, $src}",
Dan Gohman11821702007-07-27 17:16:43 +00001173 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001174
1175// Alias bitwise logical operations using SSE logical ops on packed FP values.
1176let isTwoAddress = 1 in {
1177let isCommutable = 1 in {
Evan Chengb783fa32007-07-19 01:14:50 +00001178 def FsANDPDrr : PDI<0x54, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001179 "andpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001180 [(set FR64:$dst, (X86fand FR64:$src1, FR64:$src2))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001181 def FsORPDrr : PDI<0x56, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001182 "orpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001183 [(set FR64:$dst, (X86for FR64:$src1, FR64:$src2))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001184 def FsXORPDrr : PDI<0x57, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001185 "xorpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001186 [(set FR64:$dst, (X86fxor FR64:$src1, FR64:$src2))]>;
1187}
1188
Evan Chengb783fa32007-07-19 01:14:50 +00001189def FsANDPDrm : PDI<0x54, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001190 "andpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001191 [(set FR64:$dst, (X86fand FR64:$src1,
Dan Gohman11821702007-07-27 17:16:43 +00001192 (memopfsf64 addr:$src2)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001193def FsORPDrm : PDI<0x56, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001194 "orpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001195 [(set FR64:$dst, (X86for FR64:$src1,
Dan Gohman11821702007-07-27 17:16:43 +00001196 (memopfsf64 addr:$src2)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001197def FsXORPDrm : PDI<0x57, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001198 "xorpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001199 [(set FR64:$dst, (X86fxor FR64:$src1,
Dan Gohman11821702007-07-27 17:16:43 +00001200 (memopfsf64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001201
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001202let neverHasSideEffects = 1 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001203def FsANDNPDrr : PDI<0x55, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001204 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001205 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001206let mayLoad = 1 in
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001207def FsANDNPDrm : PDI<0x55, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001208 (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001209 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001210}
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001211}
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001212
1213/// basic_sse2_fp_binop_rm - SSE2 binops come in both scalar and vector forms.
1214///
1215/// In addition, we also have a special variant of the scalar form here to
1216/// represent the associated intrinsic operation. This form is unlike the
1217/// plain scalar form, in that it takes an entire vector (instead of a scalar)
1218/// and leaves the top elements undefined.
1219///
1220/// These three forms can each be reg+reg or reg+mem, so there are a total of
1221/// six "instructions".
1222///
1223let isTwoAddress = 1 in {
1224multiclass basic_sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1225 SDNode OpNode, Intrinsic F64Int,
1226 bit Commutable = 0> {
1227 // Scalar operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001228 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001229 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001230 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1231 let isCommutable = Commutable;
1232 }
1233
1234 // Scalar operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001235 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001236 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001237 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1238
1239 // Vector operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001240 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001241 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001242 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1243 let isCommutable = Commutable;
1244 }
1245
1246 // Vector operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001247 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001248 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +00001249 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001250
1251 // Intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001252 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001253 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001254 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1255 let isCommutable = Commutable;
1256 }
1257
1258 // Intrinsic operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001259 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001260 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001261 [(set VR128:$dst, (F64Int VR128:$src1,
1262 sse_load_f64:$src2))]>;
1263}
1264}
1265
1266// Arithmetic instructions
1267defm ADD : basic_sse2_fp_binop_rm<0x58, "add", fadd, int_x86_sse2_add_sd, 1>;
1268defm MUL : basic_sse2_fp_binop_rm<0x59, "mul", fmul, int_x86_sse2_mul_sd, 1>;
1269defm SUB : basic_sse2_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse2_sub_sd>;
1270defm DIV : basic_sse2_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse2_div_sd>;
1271
1272/// sse2_fp_binop_rm - Other SSE2 binops
1273///
1274/// This multiclass is like basic_sse2_fp_binop_rm, with the addition of
1275/// instructions for a full-vector intrinsic form. Operations that map
1276/// onto C operators don't use this form since they just use the plain
1277/// vector form instead of having a separate vector intrinsic form.
1278///
1279/// This provides a total of eight "instructions".
1280///
1281let isTwoAddress = 1 in {
1282multiclass sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1283 SDNode OpNode,
1284 Intrinsic F64Int,
1285 Intrinsic V2F64Int,
1286 bit Commutable = 0> {
1287
1288 // Scalar operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001289 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001290 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001291 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1292 let isCommutable = Commutable;
1293 }
1294
1295 // Scalar operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001296 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001297 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001298 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1299
1300 // Vector operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001301 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001302 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001303 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1304 let isCommutable = Commutable;
1305 }
1306
1307 // Vector operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001308 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001309 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +00001310 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001311
1312 // Intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001313 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001314 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001315 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1316 let isCommutable = Commutable;
1317 }
1318
1319 // Intrinsic operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001320 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001321 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001322 [(set VR128:$dst, (F64Int VR128:$src1,
1323 sse_load_f64:$src2))]>;
1324
1325 // Vector intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001326 def PDrr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001327 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001328 [(set VR128:$dst, (V2F64Int VR128:$src1, VR128:$src2))]> {
1329 let isCommutable = Commutable;
1330 }
1331
1332 // Vector intrinsic operation, reg+mem.
Dan Gohmanc747be52007-08-02 21:06:40 +00001333 def PDrm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001334 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001335 [(set VR128:$dst, (V2F64Int VR128:$src1, (load addr:$src2)))]>;
1336}
1337}
1338
1339defm MAX : sse2_fp_binop_rm<0x5F, "max", X86fmax,
1340 int_x86_sse2_max_sd, int_x86_sse2_max_pd>;
1341defm MIN : sse2_fp_binop_rm<0x5D, "min", X86fmin,
1342 int_x86_sse2_min_sd, int_x86_sse2_min_pd>;
1343
1344//===----------------------------------------------------------------------===//
1345// SSE packed FP Instructions
1346
1347// Move Instructions
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001348let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001349def MOVAPDrr : PDI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001350 "movapd\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +00001351let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001352def MOVAPDrm : PDI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001353 "movapd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001354 [(set VR128:$dst, (alignedloadv2f64 addr:$src))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001355
Evan Chengb783fa32007-07-19 01:14:50 +00001356def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001357 "movapd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001358 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001359
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001360let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001361def MOVUPDrr : PDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001362 "movupd\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +00001363let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001364def MOVUPDrm : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001365 "movupd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001366 [(set VR128:$dst, (loadv2f64 addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001367def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001368 "movupd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001369 [(store (v2f64 VR128:$src), addr:$dst)]>;
1370
1371// Intrinsic forms of MOVUPD load and store
Evan Chengb783fa32007-07-19 01:14:50 +00001372def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001373 "movupd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001374 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001375def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001376 "movupd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001377 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001378
1379let isTwoAddress = 1 in {
1380 let AddedComplexity = 20 in {
1381 def MOVLPDrm : PDI<0x12, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001382 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001383 "movlpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001384 [(set VR128:$dst,
1385 (v2f64 (vector_shuffle VR128:$src1,
1386 (scalar_to_vector (loadf64 addr:$src2)),
1387 MOVLP_shuffle_mask)))]>;
1388 def MOVHPDrm : PDI<0x16, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001389 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001390 "movhpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001391 [(set VR128:$dst,
1392 (v2f64 (vector_shuffle VR128:$src1,
1393 (scalar_to_vector (loadf64 addr:$src2)),
1394 MOVHP_shuffle_mask)))]>;
1395 } // AddedComplexity
1396} // isTwoAddress
1397
Evan Chengb783fa32007-07-19 01:14:50 +00001398def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001399 "movlpd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001400 [(store (f64 (vector_extract (v2f64 VR128:$src),
1401 (iPTR 0))), addr:$dst)]>;
1402
1403// v2f64 extract element 1 is always custom lowered to unpack high to low
1404// and extract element 0 so the non-store version isn't too horrible.
Evan Chengb783fa32007-07-19 01:14:50 +00001405def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001406 "movhpd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001407 [(store (f64 (vector_extract
1408 (v2f64 (vector_shuffle VR128:$src, (undef),
1409 UNPCKH_shuffle_mask)), (iPTR 0))),
1410 addr:$dst)]>;
1411
1412// SSE2 instructions without OpSize prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001413def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001414 "cvtdq2ps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001415 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1416 TB, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001417def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001418 "cvtdq2ps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001419 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
Dan Gohman4a4f1512007-07-18 20:23:34 +00001420 (bitconvert (memopv2i64 addr:$src))))]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001421 TB, Requires<[HasSSE2]>;
1422
1423// SSE2 instructions with XS prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001424def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001425 "cvtdq2pd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001426 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1427 XS, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001428def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001429 "cvtdq2pd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001430 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
Dan Gohman4a4f1512007-07-18 20:23:34 +00001431 (bitconvert (memopv2i64 addr:$src))))]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001432 XS, Requires<[HasSSE2]>;
1433
Evan Chengb783fa32007-07-19 01:14:50 +00001434def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001435 "cvtps2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001436 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001437def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001438 "cvtps2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001439 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1440 (load addr:$src)))]>;
1441// SSE2 packed instructions with XS prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001442def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001443 "cvttps2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001444 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))]>,
1445 XS, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001446def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001447 "cvttps2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001448 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1449 (load addr:$src)))]>,
1450 XS, Requires<[HasSSE2]>;
1451
1452// SSE2 packed instructions with XD prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001453def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001454 "cvtpd2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001455 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1456 XD, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001457def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001458 "cvtpd2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001459 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1460 (load addr:$src)))]>,
1461 XD, Requires<[HasSSE2]>;
1462
Evan Chengb783fa32007-07-19 01:14:50 +00001463def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001464 "cvttpd2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001465 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001466def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001467 "cvttpd2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001468 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1469 (load addr:$src)))]>;
1470
1471// SSE2 instructions without OpSize prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001472def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001473 "cvtps2pd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001474 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1475 TB, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001476def Int_CVTPS2PDrm : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001477 "cvtps2pd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001478 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1479 (load addr:$src)))]>,
1480 TB, Requires<[HasSSE2]>;
1481
Evan Chengb783fa32007-07-19 01:14:50 +00001482def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001483 "cvtpd2ps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001484 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001485def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001486 "cvtpd2ps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001487 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1488 (load addr:$src)))]>;
1489
1490// Match intrinsics which expect XMM operand(s).
1491// Aliases for intrinsics
1492let isTwoAddress = 1 in {
1493def Int_CVTSI2SDrr: SDI<0x2A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001494 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001495 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001496 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1497 GR32:$src2))]>;
1498def Int_CVTSI2SDrm: SDI<0x2A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001499 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001500 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001501 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1502 (loadi32 addr:$src2)))]>;
1503def Int_CVTSD2SSrr: SDI<0x5A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001504 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001505 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001506 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1507 VR128:$src2))]>;
1508def Int_CVTSD2SSrm: SDI<0x5A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001509 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001510 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001511 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1512 (load addr:$src2)))]>;
1513def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001514 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001515 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001516 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1517 VR128:$src2))]>, XS,
1518 Requires<[HasSSE2]>;
1519def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001520 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001521 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001522 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1523 (load addr:$src2)))]>, XS,
1524 Requires<[HasSSE2]>;
1525}
1526
1527// Arithmetic
1528
1529/// sse2_fp_unop_rm - SSE2 unops come in both scalar and vector forms.
1530///
1531/// In addition, we also have a special variant of the scalar form here to
1532/// represent the associated intrinsic operation. This form is unlike the
1533/// plain scalar form, in that it takes an entire vector (instead of a
1534/// scalar) and leaves the top elements undefined.
1535///
1536/// And, we have a special variant form for a full-vector intrinsic form.
1537///
1538/// These four forms can each have a reg or a mem operand, so there are a
1539/// total of eight "instructions".
1540///
1541multiclass sse2_fp_unop_rm<bits<8> opc, string OpcodeStr,
1542 SDNode OpNode,
1543 Intrinsic F64Int,
1544 Intrinsic V2F64Int,
1545 bit Commutable = 0> {
1546 // Scalar operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001547 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001548 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001549 [(set FR64:$dst, (OpNode FR64:$src))]> {
1550 let isCommutable = Commutable;
1551 }
1552
1553 // Scalar operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001554 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001555 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001556 [(set FR64:$dst, (OpNode (load addr:$src)))]>;
1557
1558 // Vector operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001559 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001560 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001561 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]> {
1562 let isCommutable = Commutable;
1563 }
1564
1565 // Vector operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001566 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001567 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +00001568 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001569
1570 // Intrinsic operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001571 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001572 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001573 [(set VR128:$dst, (F64Int VR128:$src))]> {
1574 let isCommutable = Commutable;
1575 }
1576
1577 // Intrinsic operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001578 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001579 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001580 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1581
1582 // Vector intrinsic operation, reg
Evan Chengb783fa32007-07-19 01:14:50 +00001583 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001584 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001585 [(set VR128:$dst, (V2F64Int VR128:$src))]> {
1586 let isCommutable = Commutable;
1587 }
1588
1589 // Vector intrinsic operation, mem
Dan Gohmanc747be52007-08-02 21:06:40 +00001590 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001591 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001592 [(set VR128:$dst, (V2F64Int (load addr:$src)))]>;
1593}
1594
1595// Square root.
1596defm SQRT : sse2_fp_unop_rm<0x51, "sqrt", fsqrt,
1597 int_x86_sse2_sqrt_sd, int_x86_sse2_sqrt_pd>;
1598
1599// There is no f64 version of the reciprocal approximation instructions.
1600
1601// Logical
1602let isTwoAddress = 1 in {
1603 let isCommutable = 1 in {
1604 def ANDPDrr : PDI<0x54, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001605 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001606 "andpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001607 [(set VR128:$dst,
1608 (and (bc_v2i64 (v2f64 VR128:$src1)),
1609 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1610 def ORPDrr : PDI<0x56, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001611 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001612 "orpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001613 [(set VR128:$dst,
1614 (or (bc_v2i64 (v2f64 VR128:$src1)),
1615 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1616 def XORPDrr : PDI<0x57, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001617 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001618 "xorpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001619 [(set VR128:$dst,
1620 (xor (bc_v2i64 (v2f64 VR128:$src1)),
1621 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1622 }
1623
1624 def ANDPDrm : PDI<0x54, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001625 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001626 "andpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001627 [(set VR128:$dst,
1628 (and (bc_v2i64 (v2f64 VR128:$src1)),
Evan Cheng8e92cd12007-07-19 23:34:10 +00001629 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001630 def ORPDrm : PDI<0x56, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001631 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001632 "orpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001633 [(set VR128:$dst,
1634 (or (bc_v2i64 (v2f64 VR128:$src1)),
Evan Cheng8e92cd12007-07-19 23:34:10 +00001635 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001636 def XORPDrm : PDI<0x57, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001637 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001638 "xorpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001639 [(set VR128:$dst,
1640 (xor (bc_v2i64 (v2f64 VR128:$src1)),
Evan Cheng8e92cd12007-07-19 23:34:10 +00001641 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001642 def ANDNPDrr : PDI<0x55, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001643 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001644 "andnpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001645 [(set VR128:$dst,
1646 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1647 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1648 def ANDNPDrm : PDI<0x55, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001649 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001650 "andnpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001651 [(set VR128:$dst,
1652 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
Evan Cheng8e92cd12007-07-19 23:34:10 +00001653 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001654}
1655
1656let isTwoAddress = 1 in {
1657 def CMPPDrri : PDIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001658 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001659 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001660 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
1661 VR128:$src, imm:$cc))]>;
1662 def CMPPDrmi : PDIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001663 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001664 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001665 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
1666 (load addr:$src), imm:$cc))]>;
1667}
1668
1669// Shuffle and unpack instructions
1670let isTwoAddress = 1 in {
1671 def SHUFPDrri : PDIi8<0xC6, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001672 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +00001673 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001674 [(set VR128:$dst, (v2f64 (vector_shuffle
1675 VR128:$src1, VR128:$src2,
1676 SHUFP_shuffle_mask:$src3)))]>;
1677 def SHUFPDrmi : PDIi8<0xC6, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001678 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001679 f128mem:$src2, i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +00001680 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001681 [(set VR128:$dst,
1682 (v2f64 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +00001683 VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001684 SHUFP_shuffle_mask:$src3)))]>;
1685
1686 let AddedComplexity = 10 in {
1687 def UNPCKHPDrr : PDI<0x15, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001688 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001689 "unpckhpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001690 [(set VR128:$dst,
1691 (v2f64 (vector_shuffle
1692 VR128:$src1, VR128:$src2,
1693 UNPCKH_shuffle_mask)))]>;
1694 def UNPCKHPDrm : PDI<0x15, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001695 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001696 "unpckhpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001697 [(set VR128:$dst,
1698 (v2f64 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +00001699 VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001700 UNPCKH_shuffle_mask)))]>;
1701
1702 def UNPCKLPDrr : PDI<0x14, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001703 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001704 "unpcklpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001705 [(set VR128:$dst,
1706 (v2f64 (vector_shuffle
1707 VR128:$src1, VR128:$src2,
1708 UNPCKL_shuffle_mask)))]>;
1709 def UNPCKLPDrm : PDI<0x14, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001710 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001711 "unpcklpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001712 [(set VR128:$dst,
1713 (v2f64 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +00001714 VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001715 UNPCKL_shuffle_mask)))]>;
1716 } // AddedComplexity
1717} // isTwoAddress
1718
1719
1720//===----------------------------------------------------------------------===//
1721// SSE integer instructions
1722
1723// Move Instructions
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001724let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001725def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001726 "movdqa\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001727let isSimpleLoad = 1, mayLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001728def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001729 "movdqa\t{$src, $dst|$dst, $src}",
Evan Cheng51a49b22007-07-20 00:27:43 +00001730 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001731let mayStore = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001732def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001733 "movdqa\t{$src, $dst|$dst, $src}",
Evan Cheng51a49b22007-07-20 00:27:43 +00001734 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001735let isSimpleLoad = 1, mayLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001736def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001737 "movdqu\t{$src, $dst|$dst, $src}",
Evan Cheng51a49b22007-07-20 00:27:43 +00001738 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001739 XS, Requires<[HasSSE2]>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001740let mayStore = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001741def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001742 "movdqu\t{$src, $dst|$dst, $src}",
Evan Cheng51a49b22007-07-20 00:27:43 +00001743 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001744 XS, Requires<[HasSSE2]>;
1745
Dan Gohman4a4f1512007-07-18 20:23:34 +00001746// Intrinsic forms of MOVDQU load and store
Chris Lattner1a1932c2008-01-06 23:38:27 +00001747let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001748def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001749 "movdqu\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001750 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
1751 XS, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001752def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001753 "movdqu\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001754 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
1755 XS, Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001756
1757let isTwoAddress = 1 in {
1758
1759multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
1760 bit Commutable = 0> {
Evan Chengb783fa32007-07-19 01:14:50 +00001761 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001762 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001763 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]> {
1764 let isCommutable = Commutable;
1765 }
Evan Chengb783fa32007-07-19 01:14:50 +00001766 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001767 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001768 [(set VR128:$dst, (IntId VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00001769 (bitconvert (memopv2i64 addr:$src2))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001770}
1771
1772multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
1773 string OpcodeStr, Intrinsic IntId> {
Evan Chengb783fa32007-07-19 01:14:50 +00001774 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001775 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001776 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001777 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001778 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001779 [(set VR128:$dst, (IntId VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00001780 (bitconvert (memopv2i64 addr:$src2))))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001781 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001782 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001783 [(set VR128:$dst, (IntId VR128:$src1,
1784 (scalar_to_vector (i32 imm:$src2))))]>;
1785}
1786
1787
1788/// PDI_binop_rm - Simple SSE2 binary operator.
1789multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
1790 ValueType OpVT, bit Commutable = 0> {
Evan Chengb783fa32007-07-19 01:14:50 +00001791 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001792 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001793 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]> {
1794 let isCommutable = Commutable;
1795 }
Evan Chengb783fa32007-07-19 01:14:50 +00001796 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001797 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001798 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00001799 (bitconvert (memopv2i64 addr:$src2)))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001800}
1801
1802/// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
1803///
1804/// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
1805/// to collapse (bitconvert VT to VT) into its operand.
1806///
1807multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
1808 bit Commutable = 0> {
Evan Chengb783fa32007-07-19 01:14:50 +00001809 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001810 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001811 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]> {
1812 let isCommutable = Commutable;
1813 }
Evan Chengb783fa32007-07-19 01:14:50 +00001814 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001815 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +00001816 [(set VR128:$dst, (OpNode VR128:$src1,(memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001817}
1818
1819} // isTwoAddress
1820
1821// 128-bit Integer Arithmetic
1822
1823defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
1824defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
1825defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
1826defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
1827
1828defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
1829defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
1830defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
1831defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
1832
1833defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
1834defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
1835defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
1836defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
1837
1838defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
1839defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
1840defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
1841defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
1842
1843defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
1844
1845defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
1846defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w , 1>;
1847defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
1848
1849defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
1850
1851defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
1852defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
1853
1854
1855defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
1856defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
1857defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
1858defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
1859defm PSADBW : PDI_binop_rm_int<0xE0, "psadbw", int_x86_sse2_psad_bw, 1>;
1860
1861
1862defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw", int_x86_sse2_psll_w>;
1863defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld", int_x86_sse2_psll_d>;
1864defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq", int_x86_sse2_psll_q>;
1865
1866defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw", int_x86_sse2_psrl_w>;
1867defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld", int_x86_sse2_psrl_d>;
1868defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq", int_x86_sse2_psrl_q>;
1869
1870defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw", int_x86_sse2_psra_w>;
1871defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad", int_x86_sse2_psra_d>;
1872// PSRAQ doesn't exist in SSE[1-3].
1873
1874// 128-bit logical shifts.
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001875let isTwoAddress = 1, neverHasSideEffects = 1 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001876 def PSLLDQri : PDIi8<0x73, MRM7r,
Evan Chengb783fa32007-07-19 01:14:50 +00001877 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001878 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001879 def PSRLDQri : PDIi8<0x73, MRM3r,
Evan Chengb783fa32007-07-19 01:14:50 +00001880 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001881 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001882 // PSRADQri doesn't exist in SSE[1-3].
1883}
1884
1885let Predicates = [HasSSE2] in {
1886 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
1887 (v2i64 (PSLLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1888 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
1889 (v2i64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1890 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
1891 (v2f64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1892}
1893
1894// Logical
1895defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
1896defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or , 1>;
1897defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
1898
1899let isTwoAddress = 1 in {
1900 def PANDNrr : PDI<0xDF, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001901 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001902 "pandn\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001903 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
1904 VR128:$src2)))]>;
1905
1906 def PANDNrm : PDI<0xDF, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001907 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001908 "pandn\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001909 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
Dan Gohman7dc19012007-08-02 21:17:01 +00001910 (memopv2i64 addr:$src2))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001911}
1912
1913// SSE2 Integer comparison
1914defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b>;
1915defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w>;
1916defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d>;
1917defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
1918defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
1919defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
1920
1921// Pack instructions
1922defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
1923defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
1924defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
1925
1926// Shuffle and unpack instructions
1927def PSHUFDri : PDIi8<0x70, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001928 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001929 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001930 [(set VR128:$dst, (v4i32 (vector_shuffle
1931 VR128:$src1, (undef),
1932 PSHUFD_shuffle_mask:$src2)))]>;
1933def PSHUFDmi : PDIi8<0x70, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001934 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001935 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001936 [(set VR128:$dst, (v4i32 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00001937 (bc_v4i32(memopv2i64 addr:$src1)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001938 (undef),
1939 PSHUFD_shuffle_mask:$src2)))]>;
1940
1941// SSE2 with ImmT == Imm8 and XS prefix.
1942def PSHUFHWri : Ii8<0x70, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001943 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001944 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001945 [(set VR128:$dst, (v8i16 (vector_shuffle
1946 VR128:$src1, (undef),
1947 PSHUFHW_shuffle_mask:$src2)))]>,
1948 XS, Requires<[HasSSE2]>;
1949def PSHUFHWmi : Ii8<0x70, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001950 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001951 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001952 [(set VR128:$dst, (v8i16 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00001953 (bc_v8i16 (memopv2i64 addr:$src1)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001954 (undef),
1955 PSHUFHW_shuffle_mask:$src2)))]>,
1956 XS, Requires<[HasSSE2]>;
1957
1958// SSE2 with ImmT == Imm8 and XD prefix.
1959def PSHUFLWri : Ii8<0x70, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001960 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001961 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001962 [(set VR128:$dst, (v8i16 (vector_shuffle
1963 VR128:$src1, (undef),
1964 PSHUFLW_shuffle_mask:$src2)))]>,
1965 XD, Requires<[HasSSE2]>;
1966def PSHUFLWmi : Ii8<0x70, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001967 (outs VR128:$dst), (ins i128mem:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001968 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001969 [(set VR128:$dst, (v8i16 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00001970 (bc_v8i16 (memopv2i64 addr:$src1)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001971 (undef),
1972 PSHUFLW_shuffle_mask:$src2)))]>,
1973 XD, Requires<[HasSSE2]>;
1974
1975
1976let isTwoAddress = 1 in {
1977 def PUNPCKLBWrr : PDI<0x60, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001978 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001979 "punpcklbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001980 [(set VR128:$dst,
1981 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
1982 UNPCKL_shuffle_mask)))]>;
1983 def PUNPCKLBWrm : PDI<0x60, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001984 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001985 "punpcklbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001986 [(set VR128:$dst,
1987 (v16i8 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00001988 (bc_v16i8 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001989 UNPCKL_shuffle_mask)))]>;
1990 def PUNPCKLWDrr : PDI<0x61, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001991 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001992 "punpcklwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001993 [(set VR128:$dst,
1994 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
1995 UNPCKL_shuffle_mask)))]>;
1996 def PUNPCKLWDrm : PDI<0x61, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001997 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001998 "punpcklwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001999 [(set VR128:$dst,
2000 (v8i16 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002001 (bc_v8i16 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002002 UNPCKL_shuffle_mask)))]>;
2003 def PUNPCKLDQrr : PDI<0x62, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002004 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002005 "punpckldq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002006 [(set VR128:$dst,
2007 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2008 UNPCKL_shuffle_mask)))]>;
2009 def PUNPCKLDQrm : PDI<0x62, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002010 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002011 "punpckldq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002012 [(set VR128:$dst,
2013 (v4i32 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002014 (bc_v4i32 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002015 UNPCKL_shuffle_mask)))]>;
2016 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002017 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002018 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002019 [(set VR128:$dst,
2020 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2021 UNPCKL_shuffle_mask)))]>;
2022 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002023 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002024 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002025 [(set VR128:$dst,
2026 (v2i64 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002027 (memopv2i64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002028 UNPCKL_shuffle_mask)))]>;
2029
2030 def PUNPCKHBWrr : PDI<0x68, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002031 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002032 "punpckhbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002033 [(set VR128:$dst,
2034 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
2035 UNPCKH_shuffle_mask)))]>;
2036 def PUNPCKHBWrm : PDI<0x68, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002037 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002038 "punpckhbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002039 [(set VR128:$dst,
2040 (v16i8 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002041 (bc_v16i8 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002042 UNPCKH_shuffle_mask)))]>;
2043 def PUNPCKHWDrr : PDI<0x69, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002044 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002045 "punpckhwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002046 [(set VR128:$dst,
2047 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
2048 UNPCKH_shuffle_mask)))]>;
2049 def PUNPCKHWDrm : PDI<0x69, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002050 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002051 "punpckhwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002052 [(set VR128:$dst,
2053 (v8i16 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002054 (bc_v8i16 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002055 UNPCKH_shuffle_mask)))]>;
2056 def PUNPCKHDQrr : PDI<0x6A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002057 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002058 "punpckhdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002059 [(set VR128:$dst,
2060 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2061 UNPCKH_shuffle_mask)))]>;
2062 def PUNPCKHDQrm : PDI<0x6A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002063 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002064 "punpckhdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002065 [(set VR128:$dst,
2066 (v4i32 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002067 (bc_v4i32 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002068 UNPCKH_shuffle_mask)))]>;
2069 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002070 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002071 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002072 [(set VR128:$dst,
2073 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2074 UNPCKH_shuffle_mask)))]>;
2075 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002076 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002077 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002078 [(set VR128:$dst,
2079 (v2i64 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002080 (memopv2i64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002081 UNPCKH_shuffle_mask)))]>;
2082}
2083
2084// Extract / Insert
2085def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002086 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002087 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002088 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2089 (iPTR imm:$src2)))]>;
2090let isTwoAddress = 1 in {
2091 def PINSRWrri : PDIi8<0xC4, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002092 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002093 GR32:$src2, i32i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +00002094 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002095 [(set VR128:$dst,
2096 (v8i16 (X86pinsrw (v8i16 VR128:$src1),
2097 GR32:$src2, (iPTR imm:$src3))))]>;
2098 def PINSRWrmi : PDIi8<0xC4, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002099 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002100 i16mem:$src2, i32i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +00002101 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002102 [(set VR128:$dst,
2103 (v8i16 (X86pinsrw (v8i16 VR128:$src1),
2104 (i32 (anyext (loadi16 addr:$src2))),
2105 (iPTR imm:$src3))))]>;
2106}
2107
2108// Mask creation
Evan Chengb783fa32007-07-19 01:14:50 +00002109def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002110 "pmovmskb\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002111 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2112
2113// Conditional store
Evan Cheng6e4d1d92007-09-11 19:55:27 +00002114let Uses = [EDI] in
Evan Chengb783fa32007-07-19 01:14:50 +00002115def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
Dan Gohman91888f02007-07-31 20:11:57 +00002116 "maskmovdqu\t{$mask, $src|$src, $mask}",
Evan Cheng6e4d1d92007-09-11 19:55:27 +00002117 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002118
2119// Non-temporal stores
Evan Chengb783fa32007-07-19 01:14:50 +00002120def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002121 "movntpd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002122 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002123def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002124 "movntdq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002125 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002126def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002127 "movnti\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002128 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2129 TB, Requires<[HasSSE2]>;
2130
2131// Flush cache
Evan Chengb783fa32007-07-19 01:14:50 +00002132def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002133 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002134 TB, Requires<[HasSSE2]>;
2135
2136// Load, store, and memory fence
Evan Chengb783fa32007-07-19 01:14:50 +00002137def LFENCE : I<0xAE, MRM5m, (outs), (ins),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002138 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002139def MFENCE : I<0xAE, MRM6m, (outs), (ins),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002140 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
2141
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002142// Alias instructions that map zero vector to pxor / xorp* for sse.
Chris Lattner17dab4a2008-01-10 05:45:39 +00002143let isReMaterializable = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00002144 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +00002145 "pcmpeqd\t$dst, $dst",
Chris Lattnere6aa3862007-11-25 00:24:49 +00002146 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002147
2148// FR64 to 128-bit vector conversion.
Evan Chengb783fa32007-07-19 01:14:50 +00002149def MOVSD2PDrr : SDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002150 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002151 [(set VR128:$dst,
2152 (v2f64 (scalar_to_vector FR64:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002153def MOVSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002154 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002155 [(set VR128:$dst,
2156 (v2f64 (scalar_to_vector (loadf64 addr:$src))))]>;
2157
Evan Chengb783fa32007-07-19 01:14:50 +00002158def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002159 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002160 [(set VR128:$dst,
2161 (v4i32 (scalar_to_vector GR32:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002162def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002163 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002164 [(set VR128:$dst,
2165 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2166
Evan Chengb783fa32007-07-19 01:14:50 +00002167def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002168 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002169 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2170
Evan Chengb783fa32007-07-19 01:14:50 +00002171def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002172 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002173 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2174
2175// SSE2 instructions with XS prefix
Evan Chengb783fa32007-07-19 01:14:50 +00002176def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002177 "movq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002178 [(set VR128:$dst,
2179 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
2180 Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002181def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002182 "movq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002183 [(store (i64 (vector_extract (v2i64 VR128:$src),
2184 (iPTR 0))), addr:$dst)]>;
2185
2186// FIXME: may not be able to eliminate this movss with coalescing the src and
2187// dest register classes are different. We really want to write this pattern
2188// like this:
2189// def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2190// (f32 FR32:$src)>;
Evan Chengb783fa32007-07-19 01:14:50 +00002191def MOVPD2SDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002192 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002193 [(set FR64:$dst, (vector_extract (v2f64 VR128:$src),
2194 (iPTR 0)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002195def MOVPD2SDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002196 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002197 [(store (f64 (vector_extract (v2f64 VR128:$src),
2198 (iPTR 0))), addr:$dst)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002199def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002200 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002201 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2202 (iPTR 0)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002203def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002204 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002205 [(store (i32 (vector_extract (v4i32 VR128:$src),
2206 (iPTR 0))), addr:$dst)]>;
2207
Evan Chengb783fa32007-07-19 01:14:50 +00002208def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002209 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002210 [(set GR32:$dst, (bitconvert FR32:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002211def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002212 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002213 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
2214
2215
2216// Move to lower bits of a VR128, leaving upper bits alone.
2217// Three operand (but two address) aliases.
2218let isTwoAddress = 1 in {
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00002219 let neverHasSideEffects = 1 in
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002220 def MOVLSD2PDrr : SDI<0x10, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002221 (outs VR128:$dst), (ins VR128:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002222 "movsd\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002223
2224 let AddedComplexity = 15 in
2225 def MOVLPDrr : SDI<0x10, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002226 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002227 "movsd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002228 [(set VR128:$dst,
2229 (v2f64 (vector_shuffle VR128:$src1, VR128:$src2,
2230 MOVL_shuffle_mask)))]>;
2231}
2232
2233// Store / copy lower 64-bits of a XMM register.
Evan Chengb783fa32007-07-19 01:14:50 +00002234def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002235 "movq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002236 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
2237
2238// Move to lower bits of a VR128 and zeroing upper bits.
2239// Loading from memory automatically zeroing upper bits.
2240let AddedComplexity = 20 in
Evan Chengb783fa32007-07-19 01:14:50 +00002241 def MOVZSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002242 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002243 [(set VR128:$dst,
Chris Lattnere6aa3862007-11-25 00:24:49 +00002244 (v2f64 (vector_shuffle immAllZerosV_bc,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002245 (v2f64 (scalar_to_vector
2246 (loadf64 addr:$src))),
2247 MOVL_shuffle_mask)))]>;
2248
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002249// movd / movq to XMM register zero-extends
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002250let AddedComplexity = 15 in {
Evan Chengb783fa32007-07-19 01:14:50 +00002251def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002252 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002253 [(set VR128:$dst,
2254 (v4i32 (vector_shuffle immAllZerosV,
2255 (v4i32 (scalar_to_vector GR32:$src)),
2256 MOVL_shuffle_mask)))]>;
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002257// This is X86-64 only.
2258def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2259 "mov{d|q}\t{$src, $dst|$dst, $src}",
2260 [(set VR128:$dst,
2261 (v2i64 (vector_shuffle immAllZerosV_bc,
2262 (v2i64 (scalar_to_vector GR64:$src)),
2263 MOVL_shuffle_mask)))]>;
2264}
2265
2266let AddedComplexity = 20 in {
Evan Chengb783fa32007-07-19 01:14:50 +00002267def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002268 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002269 [(set VR128:$dst,
2270 (v4i32 (vector_shuffle immAllZerosV,
2271 (v4i32 (scalar_to_vector (loadi32 addr:$src))),
2272 MOVL_shuffle_mask)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002273def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002274 "movq\t{$src, $dst|$dst, $src}",
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002275 [(set VR128:$dst,
2276 (v2i64 (vector_shuffle immAllZerosV_bc,
2277 (v2i64 (scalar_to_vector (loadi64 addr:$src))),
2278 MOVL_shuffle_mask)))]>, XS,
2279 Requires<[HasSSE2]>;
2280}
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002281
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002282// Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
2283// IA32 document. movq xmm1, xmm2 does clear the high bits.
2284let AddedComplexity = 15 in
2285def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2286 "movq\t{$src, $dst|$dst, $src}",
2287 [(set VR128:$dst, (v2i64 (vector_shuffle immAllZerosV_bc,
2288 VR128:$src,
2289 MOVL_shuffle_mask)))]>,
2290 XS, Requires<[HasSSE2]>;
2291
2292let AddedComplexity = 20 in
2293def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2294 "movq\t{$src, $dst|$dst, $src}",
2295 [(set VR128:$dst, (v2i64 (vector_shuffle immAllZerosV_bc,
2296 (memopv2i64 addr:$src),
2297 MOVL_shuffle_mask)))]>,
2298 XS, Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002299
2300//===----------------------------------------------------------------------===//
2301// SSE3 Instructions
2302//===----------------------------------------------------------------------===//
2303
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002304// Move Instructions
Evan Chengb783fa32007-07-19 01:14:50 +00002305def MOVSHDUPrr : S3SI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002306 "movshdup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002307 [(set VR128:$dst, (v4f32 (vector_shuffle
2308 VR128:$src, (undef),
2309 MOVSHDUP_shuffle_mask)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002310def MOVSHDUPrm : S3SI<0x16, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002311 "movshdup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002312 [(set VR128:$dst, (v4f32 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00002313 (memopv4f32 addr:$src), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002314 MOVSHDUP_shuffle_mask)))]>;
2315
Evan Chengb783fa32007-07-19 01:14:50 +00002316def MOVSLDUPrr : S3SI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002317 "movsldup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002318 [(set VR128:$dst, (v4f32 (vector_shuffle
2319 VR128:$src, (undef),
2320 MOVSLDUP_shuffle_mask)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002321def MOVSLDUPrm : S3SI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002322 "movsldup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002323 [(set VR128:$dst, (v4f32 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00002324 (memopv4f32 addr:$src), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002325 MOVSLDUP_shuffle_mask)))]>;
2326
Evan Chengb783fa32007-07-19 01:14:50 +00002327def MOVDDUPrr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002328 "movddup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002329 [(set VR128:$dst, (v2f64 (vector_shuffle
2330 VR128:$src, (undef),
2331 SSE_splat_lo_mask)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002332def MOVDDUPrm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002333 "movddup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002334 [(set VR128:$dst,
2335 (v2f64 (vector_shuffle
2336 (scalar_to_vector (loadf64 addr:$src)),
2337 (undef),
2338 SSE_splat_lo_mask)))]>;
2339
2340// Arithmetic
2341let isTwoAddress = 1 in {
2342 def ADDSUBPSrr : S3DI<0xD0, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002343 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002344 "addsubps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002345 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
2346 VR128:$src2))]>;
2347 def ADDSUBPSrm : S3DI<0xD0, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002348 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002349 "addsubps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002350 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
2351 (load addr:$src2)))]>;
2352 def ADDSUBPDrr : S3I<0xD0, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002353 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002354 "addsubpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002355 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
2356 VR128:$src2))]>;
2357 def ADDSUBPDrm : S3I<0xD0, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002358 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002359 "addsubpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002360 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
2361 (load addr:$src2)))]>;
2362}
2363
Evan Chengb783fa32007-07-19 01:14:50 +00002364def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002365 "lddqu\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002366 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
2367
2368// Horizontal ops
2369class S3D_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
Evan Chengb783fa32007-07-19 01:14:50 +00002370 : S3DI<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002371 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002372 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
2373class S3D_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
Evan Chengb783fa32007-07-19 01:14:50 +00002374 : S3DI<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002375 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002376 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, (load addr:$src2))))]>;
2377class S3_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
Evan Chengb783fa32007-07-19 01:14:50 +00002378 : S3I<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002379 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002380 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
2381class S3_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
Evan Chengb783fa32007-07-19 01:14:50 +00002382 : S3I<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002383 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002384 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, (load addr:$src2))))]>;
2385
2386let isTwoAddress = 1 in {
2387 def HADDPSrr : S3D_Intrr<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2388 def HADDPSrm : S3D_Intrm<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2389 def HADDPDrr : S3_Intrr <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2390 def HADDPDrm : S3_Intrm <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2391 def HSUBPSrr : S3D_Intrr<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2392 def HSUBPSrm : S3D_Intrm<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2393 def HSUBPDrr : S3_Intrr <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2394 def HSUBPDrm : S3_Intrm <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2395}
2396
2397// Thread synchronization
Evan Chengb783fa32007-07-19 01:14:50 +00002398def MONITOR : I<0xC8, RawFrm, (outs), (ins), "monitor",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002399 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002400def MWAIT : I<0xC9, RawFrm, (outs), (ins), "mwait",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002401 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
2402
2403// vector_shuffle v1, <undef> <1, 1, 3, 3>
2404let AddedComplexity = 15 in
2405def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2406 MOVSHDUP_shuffle_mask)),
2407 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2408let AddedComplexity = 20 in
Dan Gohman4a4f1512007-07-18 20:23:34 +00002409def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002410 MOVSHDUP_shuffle_mask)),
2411 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
2412
2413// vector_shuffle v1, <undef> <0, 0, 2, 2>
2414let AddedComplexity = 15 in
2415 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2416 MOVSLDUP_shuffle_mask)),
2417 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2418let AddedComplexity = 20 in
Dan Gohman4a4f1512007-07-18 20:23:34 +00002419 def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002420 MOVSLDUP_shuffle_mask)),
2421 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
2422
2423//===----------------------------------------------------------------------===//
2424// SSSE3 Instructions
2425//===----------------------------------------------------------------------===//
2426
Bill Wendling3b15d722007-08-11 09:52:53 +00002427// SSSE3 Instruction Templates:
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002428//
Bill Wendling98680292007-08-10 06:22:27 +00002429// SS38I - SSSE3 instructions with T8 prefix.
2430// SS3AI - SSSE3 instructions with TA prefix.
Bill Wendling3b15d722007-08-11 09:52:53 +00002431//
2432// Note: SSSE3 instructions have 64-bit and 128-bit versions. The 64-bit version
2433// uses the MMX registers. We put those instructions here because they better
2434// fit into the SSSE3 instruction category rather than the MMX category.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002435
Evan Chengb783fa32007-07-19 01:14:50 +00002436class SS38I<bits<8> o, Format F, dag outs, dag ins, string asm,
2437 list<dag> pattern>
Bill Wendling98680292007-08-10 06:22:27 +00002438 : I<o, F, outs, ins, asm, pattern>, T8, Requires<[HasSSSE3]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002439class SS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
2440 list<dag> pattern>
Bill Wendling98680292007-08-10 06:22:27 +00002441 : I<o, F, outs, ins, asm, pattern>, TA, Requires<[HasSSSE3]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002442
Bill Wendling98680292007-08-10 06:22:27 +00002443/// SS3I_unop_rm_int_8 - Simple SSSE3 unary operator whose type is v*i8.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002444let isTwoAddress = 1 in {
Bill Wendling98680292007-08-10 06:22:27 +00002445 multiclass SS3I_unop_rm_int_8<bits<8> opc, string OpcodeStr,
2446 Intrinsic IntId64, Intrinsic IntId128,
2447 bit Commutable = 0> {
2448 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
2449 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2450 [(set VR64:$dst, (IntId64 VR64:$src))]> {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002451 let isCommutable = Commutable;
2452 }
Bill Wendling98680292007-08-10 06:22:27 +00002453 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
2454 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2455 [(set VR64:$dst,
2456 (IntId64 (bitconvert (memopv8i8 addr:$src))))]>;
2457
2458 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2459 (ins VR128:$src),
2460 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2461 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2462 OpSize {
2463 let isCommutable = Commutable;
2464 }
2465 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2466 (ins i128mem:$src),
2467 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2468 [(set VR128:$dst,
2469 (IntId128
2470 (bitconvert (memopv16i8 addr:$src))))]>, OpSize;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002471 }
2472}
2473
Bill Wendling98680292007-08-10 06:22:27 +00002474/// SS3I_unop_rm_int_16 - Simple SSSE3 unary operator whose type is v*i16.
2475let isTwoAddress = 1 in {
2476 multiclass SS3I_unop_rm_int_16<bits<8> opc, string OpcodeStr,
2477 Intrinsic IntId64, Intrinsic IntId128,
2478 bit Commutable = 0> {
2479 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2480 (ins VR64:$src),
2481 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2482 [(set VR64:$dst, (IntId64 VR64:$src))]> {
2483 let isCommutable = Commutable;
2484 }
2485 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2486 (ins i64mem:$src),
2487 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2488 [(set VR64:$dst,
2489 (IntId64
2490 (bitconvert (memopv4i16 addr:$src))))]>;
2491
2492 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2493 (ins VR128:$src),
2494 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2495 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2496 OpSize {
2497 let isCommutable = Commutable;
2498 }
2499 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2500 (ins i128mem:$src),
2501 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2502 [(set VR128:$dst,
2503 (IntId128
2504 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
2505 }
2506}
2507
2508/// SS3I_unop_rm_int_32 - Simple SSSE3 unary operator whose type is v*i32.
2509let isTwoAddress = 1 in {
2510 multiclass SS3I_unop_rm_int_32<bits<8> opc, string OpcodeStr,
2511 Intrinsic IntId64, Intrinsic IntId128,
2512 bit Commutable = 0> {
2513 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2514 (ins VR64:$src),
2515 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2516 [(set VR64:$dst, (IntId64 VR64:$src))]> {
2517 let isCommutable = Commutable;
2518 }
2519 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2520 (ins i64mem:$src),
2521 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2522 [(set VR64:$dst,
2523 (IntId64
2524 (bitconvert (memopv2i32 addr:$src))))]>;
2525
2526 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2527 (ins VR128:$src),
2528 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2529 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2530 OpSize {
2531 let isCommutable = Commutable;
2532 }
2533 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2534 (ins i128mem:$src),
2535 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2536 [(set VR128:$dst,
2537 (IntId128
2538 (bitconvert (memopv4i32 addr:$src))))]>, OpSize;
2539 }
2540}
2541
2542defm PABSB : SS3I_unop_rm_int_8 <0x1C, "pabsb",
2543 int_x86_ssse3_pabs_b,
2544 int_x86_ssse3_pabs_b_128>;
2545defm PABSW : SS3I_unop_rm_int_16<0x1D, "pabsw",
2546 int_x86_ssse3_pabs_w,
2547 int_x86_ssse3_pabs_w_128>;
2548defm PABSD : SS3I_unop_rm_int_32<0x1E, "pabsd",
2549 int_x86_ssse3_pabs_d,
2550 int_x86_ssse3_pabs_d_128>;
2551
2552/// SS3I_binop_rm_int_8 - Simple SSSE3 binary operator whose type is v*i8.
2553let isTwoAddress = 1 in {
2554 multiclass SS3I_binop_rm_int_8<bits<8> opc, string OpcodeStr,
2555 Intrinsic IntId64, Intrinsic IntId128,
2556 bit Commutable = 0> {
2557 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2558 (ins VR64:$src1, VR64:$src2),
2559 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2560 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2561 let isCommutable = Commutable;
2562 }
2563 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2564 (ins VR64:$src1, i64mem:$src2),
2565 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2566 [(set VR64:$dst,
2567 (IntId64 VR64:$src1,
2568 (bitconvert (memopv8i8 addr:$src2))))]>;
2569
2570 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2571 (ins VR128:$src1, VR128:$src2),
2572 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2573 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2574 OpSize {
2575 let isCommutable = Commutable;
2576 }
2577 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2578 (ins VR128:$src1, i128mem:$src2),
2579 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2580 [(set VR128:$dst,
2581 (IntId128 VR128:$src1,
2582 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
2583 }
2584}
2585
2586/// SS3I_binop_rm_int_16 - Simple SSSE3 binary operator whose type is v*i16.
2587let isTwoAddress = 1 in {
2588 multiclass SS3I_binop_rm_int_16<bits<8> opc, string OpcodeStr,
2589 Intrinsic IntId64, Intrinsic IntId128,
2590 bit Commutable = 0> {
2591 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2592 (ins VR64:$src1, VR64:$src2),
2593 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2594 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2595 let isCommutable = Commutable;
2596 }
2597 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2598 (ins VR64:$src1, i64mem:$src2),
2599 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2600 [(set VR64:$dst,
2601 (IntId64 VR64:$src1,
2602 (bitconvert (memopv4i16 addr:$src2))))]>;
2603
2604 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2605 (ins VR128:$src1, VR128:$src2),
2606 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2607 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2608 OpSize {
2609 let isCommutable = Commutable;
2610 }
2611 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2612 (ins VR128:$src1, i128mem:$src2),
2613 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2614 [(set VR128:$dst,
2615 (IntId128 VR128:$src1,
2616 (bitconvert (memopv8i16 addr:$src2))))]>, OpSize;
2617 }
2618}
2619
2620/// SS3I_binop_rm_int_32 - Simple SSSE3 binary operator whose type is v*i32.
2621let isTwoAddress = 1 in {
2622 multiclass SS3I_binop_rm_int_32<bits<8> opc, string OpcodeStr,
2623 Intrinsic IntId64, Intrinsic IntId128,
2624 bit Commutable = 0> {
2625 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2626 (ins VR64:$src1, VR64:$src2),
2627 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2628 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2629 let isCommutable = Commutable;
2630 }
2631 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2632 (ins VR64:$src1, i64mem:$src2),
2633 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2634 [(set VR64:$dst,
2635 (IntId64 VR64:$src1,
2636 (bitconvert (memopv2i32 addr:$src2))))]>;
2637
2638 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2639 (ins VR128:$src1, VR128:$src2),
2640 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2641 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2642 OpSize {
2643 let isCommutable = Commutable;
2644 }
2645 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2646 (ins VR128:$src1, i128mem:$src2),
2647 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2648 [(set VR128:$dst,
2649 (IntId128 VR128:$src1,
2650 (bitconvert (memopv4i32 addr:$src2))))]>, OpSize;
2651 }
2652}
2653
2654defm PHADDW : SS3I_binop_rm_int_16<0x01, "phaddw",
2655 int_x86_ssse3_phadd_w,
2656 int_x86_ssse3_phadd_w_128, 1>;
2657defm PHADDD : SS3I_binop_rm_int_32<0x02, "phaddd",
2658 int_x86_ssse3_phadd_d,
2659 int_x86_ssse3_phadd_d_128, 1>;
2660defm PHADDSW : SS3I_binop_rm_int_16<0x03, "phaddsw",
2661 int_x86_ssse3_phadd_sw,
2662 int_x86_ssse3_phadd_sw_128, 1>;
2663defm PHSUBW : SS3I_binop_rm_int_16<0x05, "phsubw",
2664 int_x86_ssse3_phsub_w,
2665 int_x86_ssse3_phsub_w_128>;
2666defm PHSUBD : SS3I_binop_rm_int_32<0x06, "phsubd",
2667 int_x86_ssse3_phsub_d,
2668 int_x86_ssse3_phsub_d_128>;
2669defm PHSUBSW : SS3I_binop_rm_int_16<0x07, "phsubsw",
2670 int_x86_ssse3_phsub_sw,
2671 int_x86_ssse3_phsub_sw_128>;
2672defm PMADDUBSW : SS3I_binop_rm_int_8 <0x04, "pmaddubsw",
2673 int_x86_ssse3_pmadd_ub_sw,
2674 int_x86_ssse3_pmadd_ub_sw_128, 1>;
2675defm PMULHRSW : SS3I_binop_rm_int_16<0x0B, "pmulhrsw",
2676 int_x86_ssse3_pmul_hr_sw,
2677 int_x86_ssse3_pmul_hr_sw_128, 1>;
2678defm PSHUFB : SS3I_binop_rm_int_8 <0x00, "pshufb",
2679 int_x86_ssse3_pshuf_b,
2680 int_x86_ssse3_pshuf_b_128>;
2681defm PSIGNB : SS3I_binop_rm_int_8 <0x08, "psignb",
2682 int_x86_ssse3_psign_b,
2683 int_x86_ssse3_psign_b_128>;
2684defm PSIGNW : SS3I_binop_rm_int_16<0x09, "psignw",
2685 int_x86_ssse3_psign_w,
2686 int_x86_ssse3_psign_w_128>;
2687defm PSIGND : SS3I_binop_rm_int_32<0x09, "psignd",
2688 int_x86_ssse3_psign_d,
2689 int_x86_ssse3_psign_d_128>;
2690
2691let isTwoAddress = 1 in {
Bill Wendling1dc817c2007-08-10 09:00:17 +00002692 def PALIGNR64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
2693 (ins VR64:$src1, VR64:$src2, i16imm:$src3),
Dale Johannesen576b27e2007-10-11 20:58:37 +00002694 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Bill Wendling1dc817c2007-08-10 09:00:17 +00002695 [(set VR64:$dst,
2696 (int_x86_ssse3_palign_r
2697 VR64:$src1, VR64:$src2,
2698 imm:$src3))]>;
2699 def PALIGNR64rm : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
2700 (ins VR64:$src1, i64mem:$src2, i16imm:$src3),
Dale Johannesen576b27e2007-10-11 20:58:37 +00002701 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Bill Wendling1dc817c2007-08-10 09:00:17 +00002702 [(set VR64:$dst,
2703 (int_x86_ssse3_palign_r
2704 VR64:$src1,
2705 (bitconvert (memopv2i32 addr:$src2)),
2706 imm:$src3))]>;
Bill Wendling98680292007-08-10 06:22:27 +00002707
Bill Wendling1dc817c2007-08-10 09:00:17 +00002708 def PALIGNR128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
2709 (ins VR128:$src1, VR128:$src2, i32imm:$src3),
Dale Johannesen576b27e2007-10-11 20:58:37 +00002710 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Bill Wendling1dc817c2007-08-10 09:00:17 +00002711 [(set VR128:$dst,
2712 (int_x86_ssse3_palign_r_128
2713 VR128:$src1, VR128:$src2,
2714 imm:$src3))]>, OpSize;
2715 def PALIGNR128rm : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
2716 (ins VR128:$src1, i128mem:$src2, i32imm:$src3),
Dale Johannesen576b27e2007-10-11 20:58:37 +00002717 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Bill Wendling1dc817c2007-08-10 09:00:17 +00002718 [(set VR128:$dst,
2719 (int_x86_ssse3_palign_r_128
2720 VR128:$src1,
2721 (bitconvert (memopv4i32 addr:$src2)),
2722 imm:$src3))]>, OpSize;
Bill Wendling98680292007-08-10 06:22:27 +00002723}
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002724
2725//===----------------------------------------------------------------------===//
2726// Non-Instruction Patterns
2727//===----------------------------------------------------------------------===//
2728
2729// 128-bit vector undef's.
Bill Wendling1dc817c2007-08-10 09:00:17 +00002730def : Pat<(v4f32 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002731def : Pat<(v2f64 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2732def : Pat<(v16i8 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2733def : Pat<(v8i16 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2734def : Pat<(v4i32 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2735def : Pat<(v2i64 (undef)), (IMPLICIT_DEF_VR128)>, Requires<[HasSSE2]>;
2736
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002737// Scalar to v8i16 / v16i8. The source may be a GR32, but only the lower 8 or
2738// 16-bits matter.
2739def : Pat<(v8i16 (X86s2vec GR32:$src)), (MOVDI2PDIrr GR32:$src)>,
2740 Requires<[HasSSE2]>;
2741def : Pat<(v16i8 (X86s2vec GR32:$src)), (MOVDI2PDIrr GR32:$src)>,
2742 Requires<[HasSSE2]>;
2743
2744// bit_convert
2745let Predicates = [HasSSE2] in {
2746 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
2747 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
2748 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
2749 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
2750 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
2751 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
2752 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
2753 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
2754 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
2755 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
2756 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
2757 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
2758 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
2759 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
2760 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
2761 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
2762 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
2763 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
2764 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
2765 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
2766 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
2767 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
2768 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
2769 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
2770 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
2771 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
2772 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
2773 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
2774 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
2775 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
2776}
2777
2778// Move scalar to XMM zero-extended
2779// movd to XMM register zero-extends
2780let AddedComplexity = 15 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002781// Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
Chris Lattnere6aa3862007-11-25 00:24:49 +00002782def : Pat<(v2f64 (vector_shuffle immAllZerosV_bc,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002783 (v2f64 (scalar_to_vector FR64:$src)), MOVL_shuffle_mask)),
2784 (MOVLSD2PDrr (V_SET0), FR64:$src)>, Requires<[HasSSE2]>;
Chris Lattnere6aa3862007-11-25 00:24:49 +00002785def : Pat<(v4f32 (vector_shuffle immAllZerosV_bc,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002786 (v4f32 (scalar_to_vector FR32:$src)), MOVL_shuffle_mask)),
2787 (MOVLSS2PSrr (V_SET0), FR32:$src)>, Requires<[HasSSE2]>;
2788}
2789
2790// Splat v2f64 / v2i64
2791let AddedComplexity = 10 in {
2792def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2793 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2794def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2795 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2796def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2797 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2798def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2799 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2800}
2801
2802// Splat v4f32
2803def : Pat<(vector_shuffle (v4f32 VR128:$src), (undef), SSE_splat_mask:$sm),
2804 (SHUFPSrri VR128:$src, VR128:$src, SSE_splat_mask:$sm)>,
2805 Requires<[HasSSE1]>;
2806
2807// Special unary SHUFPSrri case.
2808// FIXME: when we want non two-address code, then we should use PSHUFD?
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002809def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef),
2810 SHUFP_unary_shuffle_mask:$sm)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002811 (SHUFPSrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2812 Requires<[HasSSE1]>;
Dan Gohman7dc19012007-08-02 21:17:01 +00002813// Special unary SHUFPDrri case.
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002814def : Pat<(v2f64 (vector_shuffle VR128:$src1, (undef),
2815 SHUFP_unary_shuffle_mask:$sm)),
Dan Gohman7dc19012007-08-02 21:17:01 +00002816 (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2817 Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002818// Unary v4f32 shuffle with PSHUF* in order to fold a load.
Dan Gohman4a4f1512007-07-18 20:23:34 +00002819def : Pat<(vector_shuffle (memopv4f32 addr:$src1), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002820 SHUFP_unary_shuffle_mask:$sm),
2821 (PSHUFDmi addr:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2822 Requires<[HasSSE2]>;
2823// Special binary v4i32 shuffle cases with SHUFPS.
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002824def : Pat<(v4i32 (vector_shuffle VR128:$src1, (v4i32 VR128:$src2),
2825 PSHUFD_binary_shuffle_mask:$sm)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002826 (SHUFPSrri VR128:$src1, VR128:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2827 Requires<[HasSSE2]>;
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002828def : Pat<(v4i32 (vector_shuffle VR128:$src1,
2829 (bc_v4i32 (memopv2i64 addr:$src2)), PSHUFD_binary_shuffle_mask:$sm)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002830 (SHUFPSrmi VR128:$src1, addr:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2831 Requires<[HasSSE2]>;
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002832// Special binary v2i64 shuffle cases using SHUFPDrri.
2833def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2834 SHUFP_shuffle_mask:$sm)),
2835 (SHUFPDrri VR128:$src1, VR128:$src2, SHUFP_shuffle_mask:$sm)>,
2836 Requires<[HasSSE2]>;
2837// Special unary SHUFPDrri case.
2838def : Pat<(v2i64 (vector_shuffle VR128:$src1, (undef),
2839 SHUFP_unary_shuffle_mask:$sm)),
2840 (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2841 Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002842
2843// vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
2844let AddedComplexity = 10 in {
2845def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2846 UNPCKL_v_undef_shuffle_mask)),
2847 (UNPCKLPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2848def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2849 UNPCKL_v_undef_shuffle_mask)),
2850 (PUNPCKLBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2851def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2852 UNPCKL_v_undef_shuffle_mask)),
2853 (PUNPCKLWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2854def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2855 UNPCKL_v_undef_shuffle_mask)),
2856 (PUNPCKLDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2857}
2858
2859// vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
2860let AddedComplexity = 10 in {
2861def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2862 UNPCKH_v_undef_shuffle_mask)),
2863 (UNPCKHPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2864def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2865 UNPCKH_v_undef_shuffle_mask)),
2866 (PUNPCKHBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2867def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2868 UNPCKH_v_undef_shuffle_mask)),
2869 (PUNPCKHWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2870def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2871 UNPCKH_v_undef_shuffle_mask)),
2872 (PUNPCKHDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2873}
2874
2875let AddedComplexity = 15 in {
2876// vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
2877def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2878 MOVHP_shuffle_mask)),
2879 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
2880
2881// vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
2882def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2883 MOVHLPS_shuffle_mask)),
2884 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
2885
2886// vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
2887def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef),
2888 MOVHLPS_v_undef_shuffle_mask)),
2889 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2890def : Pat<(v4i32 (vector_shuffle VR128:$src1, (undef),
2891 MOVHLPS_v_undef_shuffle_mask)),
2892 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2893}
2894
2895let AddedComplexity = 20 in {
2896// vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
2897// vector_shuffle v1, (load v2) <0, 1, 4, 5> using MOVHPS
Dan Gohman4a4f1512007-07-18 20:23:34 +00002898def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002899 MOVLP_shuffle_mask)),
2900 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002901def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002902 MOVLP_shuffle_mask)),
2903 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002904def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002905 MOVHP_shuffle_mask)),
2906 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002907def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002908 MOVHP_shuffle_mask)),
2909 (MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2910
Dan Gohman4a4f1512007-07-18 20:23:34 +00002911def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002912 MOVLP_shuffle_mask)),
2913 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002914def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memopv2i64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002915 MOVLP_shuffle_mask)),
2916 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002917def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002918 MOVHP_shuffle_mask)),
2919 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002920def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memopv2i64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002921 MOVLP_shuffle_mask)),
2922 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2923}
2924
2925let AddedComplexity = 15 in {
2926// Setting the lowest element in the vector.
2927def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2928 MOVL_shuffle_mask)),
2929 (MOVLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2930def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2931 MOVL_shuffle_mask)),
2932 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2933
2934// vector_shuffle v1, v2 <4, 5, 2, 3> using MOVLPDrr (movsd)
2935def : Pat<(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
2936 MOVLP_shuffle_mask)),
2937 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2938def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2939 MOVLP_shuffle_mask)),
2940 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2941}
2942
2943// Set lowest element and zero upper elements.
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002944let AddedComplexity = 15 in
2945def : Pat<(v2f64 (vector_shuffle immAllZerosV_bc, VR128:$src,
2946 MOVL_shuffle_mask)),
2947 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
2948
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002949
2950// FIXME: Temporary workaround since 2-wide shuffle is broken.
2951def : Pat<(int_x86_sse2_movs_d VR128:$src1, VR128:$src2),
2952 (v2f64 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2953def : Pat<(int_x86_sse2_loadh_pd VR128:$src1, addr:$src2),
2954 (v2f64 (MOVHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2955def : Pat<(int_x86_sse2_loadl_pd VR128:$src1, addr:$src2),
2956 (v2f64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2957def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, VR128:$src2, imm:$src3),
2958 (v2f64 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$src3))>,
2959 Requires<[HasSSE2]>;
2960def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, (load addr:$src2), imm:$src3),
2961 (v2f64 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$src3))>,
2962 Requires<[HasSSE2]>;
2963def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, VR128:$src2),
2964 (v2f64 (UNPCKHPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2965def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, (load addr:$src2)),
2966 (v2f64 (UNPCKHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2967def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, VR128:$src2),
2968 (v2f64 (UNPCKLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2969def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, (load addr:$src2)),
2970 (v2f64 (UNPCKLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2971def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, VR128:$src2),
2972 (v2i64 (PUNPCKHQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2973def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, (load addr:$src2)),
2974 (v2i64 (PUNPCKHQDQrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2975def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, VR128:$src2),
2976 (v2i64 (PUNPCKLQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2977def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, (load addr:$src2)),
2978 (PUNPCKLQDQrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2979
2980// Some special case pandn patterns.
2981def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
2982 VR128:$src2)),
2983 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2984def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
2985 VR128:$src2)),
2986 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2987def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
2988 VR128:$src2)),
2989 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2990
2991def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
Dan Gohman7dc19012007-08-02 21:17:01 +00002992 (memopv2i64 addr:$src2))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002993 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2994def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
Dan Gohman7dc19012007-08-02 21:17:01 +00002995 (memopv2i64 addr:$src2))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002996 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2997def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
Dan Gohman7dc19012007-08-02 21:17:01 +00002998 (memopv2i64 addr:$src2))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002999 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3000
Nate Begeman78246ca2007-11-17 03:58:34 +00003001// vector -> vector casts
3002def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3003 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3004def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3005 (Int_CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3006
Evan Cheng51a49b22007-07-20 00:27:43 +00003007// Use movaps / movups for SSE integer load / store (one byte shorter).
Dan Gohman11821702007-07-27 17:16:43 +00003008def : Pat<(alignedloadv4i32 addr:$src),
3009 (MOVAPSrm addr:$src)>, Requires<[HasSSE1]>;
3010def : Pat<(loadv4i32 addr:$src),
3011 (MOVUPSrm addr:$src)>, Requires<[HasSSE1]>;
Evan Cheng51a49b22007-07-20 00:27:43 +00003012def : Pat<(alignedloadv2i64 addr:$src),
3013 (MOVAPSrm addr:$src)>, Requires<[HasSSE2]>;
3014def : Pat<(loadv2i64 addr:$src),
3015 (MOVUPSrm addr:$src)>, Requires<[HasSSE2]>;
3016
3017def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3018 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3019def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3020 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3021def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3022 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3023def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3024 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3025def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3026 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3027def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3028 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3029def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3030 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3031def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3032 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;