blob: 3d5959aa2f684d5a5d63085a8e63d55521305b8a [file] [log] [blame]
Arnold Schwaighofer373e8652007-10-12 21:30:57 +00001//====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner081ce942007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file describes the X86 SSE instruction set, defining the instructions,
11// and properties of the instructions which are needed for code generation,
12// machine code emission, and analysis.
13//
14//===----------------------------------------------------------------------===//
15
16
17//===----------------------------------------------------------------------===//
18// SSE specific DAG Nodes.
19//===----------------------------------------------------------------------===//
20
21def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
22 SDTCisFP<0>, SDTCisInt<2> ]>;
23
Dan Gohmanf17a25c2007-07-18 16:29:46 +000024def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
25def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
26def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
27 [SDNPCommutative, SDNPAssociative]>;
28def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
29 [SDNPCommutative, SDNPAssociative]>;
30def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
31 [SDNPCommutative, SDNPAssociative]>;
32def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
33def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
34def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
Evan Chengf37bf452007-10-01 18:12:48 +000035def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
Evan Cheng621216e2007-09-29 00:00:36 +000036def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
Nate Begemand77e59e2008-02-11 04:19:36 +000037def X86pextrb : SDNode<"X86ISD::PEXTRB",
38 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
39def X86pextrw : SDNode<"X86ISD::PEXTRW",
40 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
41def X86pinsrb : SDNode<"X86ISD::PINSRB",
42 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
43 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
44def X86pinsrw : SDNode<"X86ISD::PINSRW",
45 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
46 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
47def X86insrtps : SDNode<"X86ISD::INSERTPS",
48 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
49 SDTCisVT<2, f32>, SDTCisPtrTy<3>]>>;
Evan Chenge9b9c672008-05-09 21:53:03 +000050def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
51 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
52def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
53 [SDNPHasChain, SDNPMayLoad]>;
Evan Chengdea99362008-05-29 08:22:04 +000054def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
55def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +000056
57//===----------------------------------------------------------------------===//
Dan Gohmanf17a25c2007-07-18 16:29:46 +000058// SSE Complex Patterns
59//===----------------------------------------------------------------------===//
60
61// These are 'extloads' from a scalar to the low element of a vector, zeroing
62// the top elements. These are used for the SSE 'ss' and 'sd' instruction
63// forms.
64def sse_load_f32 : ComplexPattern<v4f32, 4, "SelectScalarSSELoad", [],
Chris Lattnerc90ee9c2008-01-10 07:59:24 +000065 [SDNPHasChain, SDNPMayLoad]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +000066def sse_load_f64 : ComplexPattern<v2f64, 4, "SelectScalarSSELoad", [],
Chris Lattnerc90ee9c2008-01-10 07:59:24 +000067 [SDNPHasChain, SDNPMayLoad]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +000068
69def ssmem : Operand<v4f32> {
70 let PrintMethod = "printf32mem";
71 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
72}
73def sdmem : Operand<v2f64> {
74 let PrintMethod = "printf64mem";
75 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
76}
77
78//===----------------------------------------------------------------------===//
79// SSE pattern fragments
80//===----------------------------------------------------------------------===//
81
Dan Gohmanf17a25c2007-07-18 16:29:46 +000082def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
83def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
84def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
85def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
86
Dan Gohman11821702007-07-27 17:16:43 +000087// Like 'store', but always requires vector alignment.
Dan Gohman4a4f1512007-07-18 20:23:34 +000088def alignedstore : PatFrag<(ops node:$val, node:$ptr),
89 (st node:$val, node:$ptr), [{
90 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
91 return !ST->isTruncatingStore() &&
92 ST->getAddressingMode() == ISD::UNINDEXED &&
Dan Gohman11821702007-07-27 17:16:43 +000093 ST->getAlignment() >= 16;
Dan Gohman4a4f1512007-07-18 20:23:34 +000094 return false;
95}]>;
96
Dan Gohman11821702007-07-27 17:16:43 +000097// Like 'load', but always requires vector alignment.
Dan Gohman4a4f1512007-07-18 20:23:34 +000098def alignedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
99 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
100 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
101 LD->getAddressingMode() == ISD::UNINDEXED &&
Dan Gohman11821702007-07-27 17:16:43 +0000102 LD->getAlignment() >= 16;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000103 return false;
104}]>;
105
Dan Gohman11821702007-07-27 17:16:43 +0000106def alignedloadfsf32 : PatFrag<(ops node:$ptr), (f32 (alignedload node:$ptr))>;
107def alignedloadfsf64 : PatFrag<(ops node:$ptr), (f64 (alignedload node:$ptr))>;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000108def alignedloadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (alignedload node:$ptr))>;
109def alignedloadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (alignedload node:$ptr))>;
110def alignedloadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (alignedload node:$ptr))>;
111def alignedloadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (alignedload node:$ptr))>;
112
113// Like 'load', but uses special alignment checks suitable for use in
114// memory operands in most SSE instructions, which are required to
115// be naturally aligned on some targets but not on others.
116// FIXME: Actually implement support for targets that don't require the
117// alignment. This probably wants a subtarget predicate.
118def memop : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
119 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
120 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
121 LD->getAddressingMode() == ISD::UNINDEXED &&
Dan Gohman11821702007-07-27 17:16:43 +0000122 LD->getAlignment() >= 16;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000123 return false;
124}]>;
125
Dan Gohman11821702007-07-27 17:16:43 +0000126def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
127def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000128def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
129def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
130def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
131def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
Nate Begeman9a58b8a2008-02-09 23:46:37 +0000132def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000133
Bill Wendling3b15d722007-08-11 09:52:53 +0000134// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
135// 16-byte boundary.
Nate Begeman9a58b8a2008-02-09 23:46:37 +0000136// FIXME: 8 byte alignment for mmx reads is not required
Bill Wendling3b15d722007-08-11 09:52:53 +0000137def memop64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
138 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
139 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
140 LD->getAddressingMode() == ISD::UNINDEXED &&
141 LD->getAlignment() >= 8;
142 return false;
143}]>;
144
145def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
Bill Wendling3b15d722007-08-11 09:52:53 +0000146def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
147def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
148def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
149
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000150def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
151def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
152def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
153def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
154def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
155def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
156
157def fp32imm0 : PatLeaf<(f32 fpimm), [{
158 return N->isExactlyValue(+0.0);
159}]>;
160
161def PSxLDQ_imm : SDNodeXForm<imm, [{
162 // Transformation function: imm >> 3
163 return getI32Imm(N->getValue() >> 3);
164}]>;
165
Nate Begeman061db5f2008-05-12 20:34:32 +0000166def SSE_CC_imm : SDNodeXForm<cond, [{
167 unsigned Val;
168 switch (N->get()) {
169 default: Val = 0; assert(0 && "Unexpected CondCode"); break;
170 case ISD::SETOEQ: Val = 0; break;
171 case ISD::SETOLT: Val = 1; break;
172 case ISD::SETOLE: Val = 2; break;
173 case ISD::SETUO: Val = 3; break;
174 case ISD::SETONE: Val = 4; break;
175 case ISD::SETOGE: Val = 5; break;
176 case ISD::SETOGT: Val = 6; break;
177 case ISD::SETO: Val = 7; break;
178 }
179 return getI8Imm(Val);
180}]>;
181
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000182// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
183// SHUFP* etc. imm.
184def SHUFFLE_get_shuf_imm : SDNodeXForm<build_vector, [{
185 return getI8Imm(X86::getShuffleSHUFImmediate(N));
186}]>;
187
188// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
189// PSHUFHW imm.
190def SHUFFLE_get_pshufhw_imm : SDNodeXForm<build_vector, [{
191 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
192}]>;
193
194// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
195// PSHUFLW imm.
196def SHUFFLE_get_pshuflw_imm : SDNodeXForm<build_vector, [{
197 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
198}]>;
199
200def SSE_splat_mask : PatLeaf<(build_vector), [{
201 return X86::isSplatMask(N);
202}], SHUFFLE_get_shuf_imm>;
203
204def SSE_splat_lo_mask : PatLeaf<(build_vector), [{
205 return X86::isSplatLoMask(N);
206}]>;
207
208def MOVHLPS_shuffle_mask : PatLeaf<(build_vector), [{
209 return X86::isMOVHLPSMask(N);
210}]>;
211
212def MOVHLPS_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
213 return X86::isMOVHLPS_v_undef_Mask(N);
214}]>;
215
216def MOVHP_shuffle_mask : PatLeaf<(build_vector), [{
217 return X86::isMOVHPMask(N);
218}]>;
219
220def MOVLP_shuffle_mask : PatLeaf<(build_vector), [{
221 return X86::isMOVLPMask(N);
222}]>;
223
224def MOVL_shuffle_mask : PatLeaf<(build_vector), [{
225 return X86::isMOVLMask(N);
226}]>;
227
228def MOVSHDUP_shuffle_mask : PatLeaf<(build_vector), [{
229 return X86::isMOVSHDUPMask(N);
230}]>;
231
232def MOVSLDUP_shuffle_mask : PatLeaf<(build_vector), [{
233 return X86::isMOVSLDUPMask(N);
234}]>;
235
236def UNPCKL_shuffle_mask : PatLeaf<(build_vector), [{
237 return X86::isUNPCKLMask(N);
238}]>;
239
240def UNPCKH_shuffle_mask : PatLeaf<(build_vector), [{
241 return X86::isUNPCKHMask(N);
242}]>;
243
244def UNPCKL_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
245 return X86::isUNPCKL_v_undef_Mask(N);
246}]>;
247
248def UNPCKH_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
249 return X86::isUNPCKH_v_undef_Mask(N);
250}]>;
251
252def PSHUFD_shuffle_mask : PatLeaf<(build_vector), [{
253 return X86::isPSHUFDMask(N);
254}], SHUFFLE_get_shuf_imm>;
255
256def PSHUFHW_shuffle_mask : PatLeaf<(build_vector), [{
257 return X86::isPSHUFHWMask(N);
258}], SHUFFLE_get_pshufhw_imm>;
259
260def PSHUFLW_shuffle_mask : PatLeaf<(build_vector), [{
261 return X86::isPSHUFLWMask(N);
262}], SHUFFLE_get_pshuflw_imm>;
263
264def SHUFP_unary_shuffle_mask : PatLeaf<(build_vector), [{
265 return X86::isPSHUFDMask(N);
266}], SHUFFLE_get_shuf_imm>;
267
268def SHUFP_shuffle_mask : PatLeaf<(build_vector), [{
269 return X86::isSHUFPMask(N);
270}], SHUFFLE_get_shuf_imm>;
271
272def PSHUFD_binary_shuffle_mask : PatLeaf<(build_vector), [{
273 return X86::isSHUFPMask(N);
274}], SHUFFLE_get_shuf_imm>;
275
Nate Begeman061db5f2008-05-12 20:34:32 +0000276
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000277//===----------------------------------------------------------------------===//
278// SSE scalar FP Instructions
279//===----------------------------------------------------------------------===//
280
281// CMOV* - Used to implement the SSE SELECT DAG operation. Expanded by the
282// scheduler into a branch sequence.
Evan Cheng950aac02007-09-25 01:57:46 +0000283// These are expanded by the scheduler.
284let Uses = [EFLAGS], usesCustomDAGSchedInserter = 1 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000285 def CMOV_FR32 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000286 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000287 "#CMOV_FR32 PSEUDO!",
Evan Cheng621216e2007-09-29 00:00:36 +0000288 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
289 EFLAGS))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000290 def CMOV_FR64 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000291 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000292 "#CMOV_FR64 PSEUDO!",
Evan Cheng621216e2007-09-29 00:00:36 +0000293 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
294 EFLAGS))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000295 def CMOV_V4F32 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000296 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000297 "#CMOV_V4F32 PSEUDO!",
298 [(set VR128:$dst,
Evan Cheng621216e2007-09-29 00:00:36 +0000299 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
300 EFLAGS)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000301 def CMOV_V2F64 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000302 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000303 "#CMOV_V2F64 PSEUDO!",
304 [(set VR128:$dst,
Evan Cheng621216e2007-09-29 00:00:36 +0000305 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
306 EFLAGS)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000307 def CMOV_V2I64 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000308 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000309 "#CMOV_V2I64 PSEUDO!",
310 [(set VR128:$dst,
Evan Cheng621216e2007-09-29 00:00:36 +0000311 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
Evan Cheng950aac02007-09-25 01:57:46 +0000312 EFLAGS)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000313}
314
315//===----------------------------------------------------------------------===//
316// SSE1 Instructions
317//===----------------------------------------------------------------------===//
318
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000319// Move Instructions
Chris Lattnerd1a9eb62008-01-11 06:59:07 +0000320let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000321def MOVSSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000322 "movss\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +0000323let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000324def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000325 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000326 [(set FR32:$dst, (loadf32 addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000327def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000328 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000329 [(store FR32:$src, addr:$dst)]>;
330
331// Conversion instructions
Evan Chengb783fa32007-07-19 01:14:50 +0000332def CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000333 "cvttss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000334 [(set GR32:$dst, (fp_to_sint FR32:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000335def CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000336 "cvttss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000337 [(set GR32:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000338def CVTSI2SSrr : SSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000339 "cvtsi2ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000340 [(set FR32:$dst, (sint_to_fp GR32:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000341def CVTSI2SSrm : SSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000342 "cvtsi2ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000343 [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
344
345// Match intrinsics which expect XMM operand(s).
Evan Chengb783fa32007-07-19 01:14:50 +0000346def Int_CVTSS2SIrr : SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000347 "cvtss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000348 [(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000349def Int_CVTSS2SIrm : SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000350 "cvtss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000351 [(set GR32:$dst, (int_x86_sse_cvtss2si
352 (load addr:$src)))]>;
353
Dale Johannesen1fbb4a52007-10-30 22:15:38 +0000354// Match intrinisics which expect MM and XMM operand(s).
355def Int_CVTPS2PIrr : PSI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
356 "cvtps2pi\t{$src, $dst|$dst, $src}",
357 [(set VR64:$dst, (int_x86_sse_cvtps2pi VR128:$src))]>;
358def Int_CVTPS2PIrm : PSI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
359 "cvtps2pi\t{$src, $dst|$dst, $src}",
360 [(set VR64:$dst, (int_x86_sse_cvtps2pi
361 (load addr:$src)))]>;
362def Int_CVTTPS2PIrr: PSI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
363 "cvttps2pi\t{$src, $dst|$dst, $src}",
364 [(set VR64:$dst, (int_x86_sse_cvttps2pi VR128:$src))]>;
365def Int_CVTTPS2PIrm: PSI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
366 "cvttps2pi\t{$src, $dst|$dst, $src}",
367 [(set VR64:$dst, (int_x86_sse_cvttps2pi
368 (load addr:$src)))]>;
Evan Cheng3ea4d672008-03-05 08:19:16 +0000369let Constraints = "$src1 = $dst" in {
Dale Johannesen1fbb4a52007-10-30 22:15:38 +0000370 def Int_CVTPI2PSrr : PSI<0x2A, MRMSrcReg,
371 (outs VR128:$dst), (ins VR128:$src1, VR64:$src2),
372 "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
373 [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
374 VR64:$src2))]>;
375 def Int_CVTPI2PSrm : PSI<0x2A, MRMSrcMem,
376 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
377 "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
378 [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
379 (load addr:$src2)))]>;
380}
381
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000382// Aliases for intrinsics
Evan Chengb783fa32007-07-19 01:14:50 +0000383def Int_CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000384 "cvttss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000385 [(set GR32:$dst,
386 (int_x86_sse_cvttss2si VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000387def Int_CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000388 "cvttss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000389 [(set GR32:$dst,
390 (int_x86_sse_cvttss2si(load addr:$src)))]>;
391
Evan Cheng3ea4d672008-03-05 08:19:16 +0000392let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000393 def Int_CVTSI2SSrr : SSI<0x2A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000394 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000395 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000396 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
397 GR32:$src2))]>;
398 def Int_CVTSI2SSrm : SSI<0x2A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000399 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000400 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000401 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
402 (loadi32 addr:$src2)))]>;
403}
404
405// Comparison instructions
Evan Cheng3ea4d672008-03-05 08:19:16 +0000406let Constraints = "$src1 = $dst" in {
Chris Lattnerd1a9eb62008-01-11 06:59:07 +0000407let neverHasSideEffects = 1 in
Chris Lattnera9f545f2007-12-16 20:12:41 +0000408 def CMPSSrr : SSIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000409 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000410 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +0000411let neverHasSideEffects = 1, mayLoad = 1 in
Chris Lattnera9f545f2007-12-16 20:12:41 +0000412 def CMPSSrm : SSIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000413 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000414 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000415}
416
Evan Cheng55687072007-09-14 21:48:26 +0000417let Defs = [EFLAGS] in {
Evan Chengb783fa32007-07-19 01:14:50 +0000418def UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000419 "ucomiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000420 [(X86cmp FR32:$src1, FR32:$src2), (implicit EFLAGS)]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000421def UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000422 "ucomiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000423 [(X86cmp FR32:$src1, (loadf32 addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +0000424 (implicit EFLAGS)]>;
Evan Cheng55687072007-09-14 21:48:26 +0000425} // Defs = [EFLAGS]
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000426
427// Aliases to match intrinsics which expect XMM operand(s).
Evan Cheng3ea4d672008-03-05 08:19:16 +0000428let Constraints = "$src1 = $dst" in {
Chris Lattnera9f545f2007-12-16 20:12:41 +0000429 def Int_CMPSSrr : SSIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000430 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000431 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000432 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
433 VR128:$src, imm:$cc))]>;
Chris Lattnera9f545f2007-12-16 20:12:41 +0000434 def Int_CMPSSrm : SSIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000435 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000436 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000437 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
438 (load addr:$src), imm:$cc))]>;
439}
440
Evan Cheng55687072007-09-14 21:48:26 +0000441let Defs = [EFLAGS] in {
Evan Cheng621216e2007-09-29 00:00:36 +0000442def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs),
Evan Cheng950aac02007-09-25 01:57:46 +0000443 (ins VR128:$src1, VR128:$src2),
444 "ucomiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000445 [(X86ucomi (v4f32 VR128:$src1), VR128:$src2),
Evan Cheng950aac02007-09-25 01:57:46 +0000446 (implicit EFLAGS)]>;
Evan Cheng621216e2007-09-29 00:00:36 +0000447def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs),
Evan Cheng950aac02007-09-25 01:57:46 +0000448 (ins VR128:$src1, f128mem:$src2),
449 "ucomiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000450 [(X86ucomi (v4f32 VR128:$src1), (load addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +0000451 (implicit EFLAGS)]>;
452
Evan Cheng621216e2007-09-29 00:00:36 +0000453def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs),
Evan Cheng950aac02007-09-25 01:57:46 +0000454 (ins VR128:$src1, VR128:$src2),
455 "comiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000456 [(X86comi (v4f32 VR128:$src1), VR128:$src2),
Evan Cheng950aac02007-09-25 01:57:46 +0000457 (implicit EFLAGS)]>;
Evan Cheng621216e2007-09-29 00:00:36 +0000458def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs),
Evan Cheng950aac02007-09-25 01:57:46 +0000459 (ins VR128:$src1, f128mem:$src2),
460 "comiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000461 [(X86comi (v4f32 VR128:$src1), (load addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +0000462 (implicit EFLAGS)]>;
Evan Cheng55687072007-09-14 21:48:26 +0000463} // Defs = [EFLAGS]
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000464
465// Aliases of packed SSE1 instructions for scalar use. These all have names that
466// start with 'Fs'.
467
468// Alias instructions that map fld0 to pxor for sse.
Chris Lattner17dab4a2008-01-10 05:45:39 +0000469let isReMaterializable = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000470def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +0000471 "pxor\t$dst, $dst", [(set FR32:$dst, fp32imm0)]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000472 Requires<[HasSSE1]>, TB, OpSize;
473
474// Alias instruction to do FR32 reg-to-reg copy using movaps. Upper bits are
475// disregarded.
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000476let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000477def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000478 "movaps\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000479
480// Alias instruction to load FR32 from f128mem using movaps. Upper bits are
481// disregarded.
Chris Lattner1a1932c2008-01-06 23:38:27 +0000482let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000483def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000484 "movaps\t{$src, $dst|$dst, $src}",
Dan Gohman11821702007-07-27 17:16:43 +0000485 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000486
487// Alias bitwise logical operations using SSE logical ops on packed FP values.
Evan Cheng3ea4d672008-03-05 08:19:16 +0000488let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000489let isCommutable = 1 in {
Evan Chengb783fa32007-07-19 01:14:50 +0000490 def FsANDPSrr : PSI<0x54, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000491 "andps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000492 [(set FR32:$dst, (X86fand FR32:$src1, FR32:$src2))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000493 def FsORPSrr : PSI<0x56, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000494 "orps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000495 [(set FR32:$dst, (X86for FR32:$src1, FR32:$src2))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000496 def FsXORPSrr : PSI<0x57, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000497 "xorps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000498 [(set FR32:$dst, (X86fxor FR32:$src1, FR32:$src2))]>;
499}
500
Evan Chengb783fa32007-07-19 01:14:50 +0000501def FsANDPSrm : PSI<0x54, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000502 "andps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000503 [(set FR32:$dst, (X86fand FR32:$src1,
Dan Gohman11821702007-07-27 17:16:43 +0000504 (memopfsf32 addr:$src2)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000505def FsORPSrm : PSI<0x56, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000506 "orps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000507 [(set FR32:$dst, (X86for FR32:$src1,
Dan Gohman11821702007-07-27 17:16:43 +0000508 (memopfsf32 addr:$src2)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000509def FsXORPSrm : PSI<0x57, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000510 "xorps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000511 [(set FR32:$dst, (X86fxor FR32:$src1,
Dan Gohman11821702007-07-27 17:16:43 +0000512 (memopfsf32 addr:$src2)))]>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000513let neverHasSideEffects = 1 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000514def FsANDNPSrr : PSI<0x55, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000515 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000516 "andnps\t{$src2, $dst|$dst, $src2}", []>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000517
518let mayLoad = 1 in
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000519def FsANDNPSrm : PSI<0x55, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000520 (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000521 "andnps\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000522}
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000523}
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000524
525/// basic_sse1_fp_binop_rm - SSE1 binops come in both scalar and vector forms.
526///
527/// In addition, we also have a special variant of the scalar form here to
528/// represent the associated intrinsic operation. This form is unlike the
529/// plain scalar form, in that it takes an entire vector (instead of a scalar)
530/// and leaves the top elements undefined.
531///
532/// These three forms can each be reg+reg or reg+mem, so there are a total of
533/// six "instructions".
534///
Evan Cheng3ea4d672008-03-05 08:19:16 +0000535let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000536multiclass basic_sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
537 SDNode OpNode, Intrinsic F32Int,
538 bit Commutable = 0> {
539 // Scalar operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000540 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000541 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000542 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
543 let isCommutable = Commutable;
544 }
545
546 // Scalar operation, reg+mem.
Evan Cheng00b66ef2008-05-23 00:37:07 +0000547 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
548 (ins FR32:$src1, f32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000549 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000550 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
551
552 // Vector operation, reg+reg.
Evan Cheng00b66ef2008-05-23 00:37:07 +0000553 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst),
554 (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000555 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000556 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
557 let isCommutable = Commutable;
558 }
559
560 // Vector operation, reg+mem.
Evan Cheng00b66ef2008-05-23 00:37:07 +0000561 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst),
562 (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000563 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Evan Cheng00b66ef2008-05-23 00:37:07 +0000564 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000565
566 // Intrinsic operation, reg+reg.
Evan Cheng00b66ef2008-05-23 00:37:07 +0000567 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
568 (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000569 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000570 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
571 let isCommutable = Commutable;
572 }
573
574 // Intrinsic operation, reg+mem.
Evan Cheng00b66ef2008-05-23 00:37:07 +0000575 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
576 (ins VR128:$src1, ssmem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000577 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000578 [(set VR128:$dst, (F32Int VR128:$src1,
579 sse_load_f32:$src2))]>;
580}
581}
582
583// Arithmetic instructions
584defm ADD : basic_sse1_fp_binop_rm<0x58, "add", fadd, int_x86_sse_add_ss, 1>;
585defm MUL : basic_sse1_fp_binop_rm<0x59, "mul", fmul, int_x86_sse_mul_ss, 1>;
586defm SUB : basic_sse1_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse_sub_ss>;
587defm DIV : basic_sse1_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse_div_ss>;
588
589/// sse1_fp_binop_rm - Other SSE1 binops
590///
591/// This multiclass is like basic_sse1_fp_binop_rm, with the addition of
592/// instructions for a full-vector intrinsic form. Operations that map
593/// onto C operators don't use this form since they just use the plain
594/// vector form instead of having a separate vector intrinsic form.
595///
596/// This provides a total of eight "instructions".
597///
Evan Cheng3ea4d672008-03-05 08:19:16 +0000598let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000599multiclass sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
600 SDNode OpNode,
601 Intrinsic F32Int,
602 Intrinsic V4F32Int,
603 bit Commutable = 0> {
604
605 // Scalar operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000606 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000607 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000608 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
609 let isCommutable = Commutable;
610 }
611
612 // Scalar operation, reg+mem.
Evan Cheng00b66ef2008-05-23 00:37:07 +0000613 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst),
614 (ins FR32:$src1, f32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000615 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000616 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
617
618 // Vector operation, reg+reg.
Evan Cheng00b66ef2008-05-23 00:37:07 +0000619 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst),
620 (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000621 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000622 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
623 let isCommutable = Commutable;
624 }
625
626 // Vector operation, reg+mem.
Evan Cheng00b66ef2008-05-23 00:37:07 +0000627 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst),
628 (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000629 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Evan Cheng00b66ef2008-05-23 00:37:07 +0000630 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000631
632 // Intrinsic operation, reg+reg.
Evan Cheng00b66ef2008-05-23 00:37:07 +0000633 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
634 (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000635 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000636 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
637 let isCommutable = Commutable;
638 }
639
640 // Intrinsic operation, reg+mem.
Evan Cheng00b66ef2008-05-23 00:37:07 +0000641 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
642 (ins VR128:$src1, ssmem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000643 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000644 [(set VR128:$dst, (F32Int VR128:$src1,
645 sse_load_f32:$src2))]>;
646
647 // Vector intrinsic operation, reg+reg.
Evan Cheng00b66ef2008-05-23 00:37:07 +0000648 def PSrr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst),
649 (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000650 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000651 [(set VR128:$dst, (V4F32Int VR128:$src1, VR128:$src2))]> {
652 let isCommutable = Commutable;
653 }
654
655 // Vector intrinsic operation, reg+mem.
Evan Cheng00b66ef2008-05-23 00:37:07 +0000656 def PSrm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst),
657 (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000658 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Evan Cheng00b66ef2008-05-23 00:37:07 +0000659 [(set VR128:$dst, (V4F32Int VR128:$src1, (memopv4f32 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000660}
661}
662
663defm MAX : sse1_fp_binop_rm<0x5F, "max", X86fmax,
664 int_x86_sse_max_ss, int_x86_sse_max_ps>;
665defm MIN : sse1_fp_binop_rm<0x5D, "min", X86fmin,
666 int_x86_sse_min_ss, int_x86_sse_min_ps>;
667
668//===----------------------------------------------------------------------===//
669// SSE packed FP Instructions
670
671// Move Instructions
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000672let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000673def MOVAPSrr : PSI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000674 "movaps\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +0000675let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000676def MOVAPSrm : PSI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000677 "movaps\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000678 [(set VR128:$dst, (alignedloadv4f32 addr:$src))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000679
Evan Chengb783fa32007-07-19 01:14:50 +0000680def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000681 "movaps\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000682 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000683
Chris Lattnerd1a9eb62008-01-11 06:59:07 +0000684let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000685def MOVUPSrr : PSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000686 "movups\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +0000687let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000688def MOVUPSrm : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000689 "movups\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000690 [(set VR128:$dst, (loadv4f32 addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000691def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000692 "movups\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000693 [(store (v4f32 VR128:$src), addr:$dst)]>;
694
695// Intrinsic forms of MOVUPS load and store
Chris Lattner1a1932c2008-01-06 23:38:27 +0000696let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000697def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000698 "movups\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000699 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000700def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000701 "movups\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000702 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000703
Evan Cheng3ea4d672008-03-05 08:19:16 +0000704let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000705 let AddedComplexity = 20 in {
706 def MOVLPSrm : PSI<0x12, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000707 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000708 "movlps\t{$src2, $dst|$dst, $src2}",
Evan Chengd743a5f2008-05-10 00:59:18 +0000709 [(set VR128:$dst,
710 (v4f32 (vector_shuffle VR128:$src1,
711 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
712 MOVLP_shuffle_mask)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000713 def MOVHPSrm : PSI<0x16, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000714 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000715 "movhps\t{$src2, $dst|$dst, $src2}",
Evan Chengd743a5f2008-05-10 00:59:18 +0000716 [(set VR128:$dst,
717 (v4f32 (vector_shuffle VR128:$src1,
718 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
719 MOVHP_shuffle_mask)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000720 } // AddedComplexity
Evan Cheng3ea4d672008-03-05 08:19:16 +0000721} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000722
Evan Chengd743a5f2008-05-10 00:59:18 +0000723
Evan Chengb783fa32007-07-19 01:14:50 +0000724def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000725 "movlps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000726 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
727 (iPTR 0))), addr:$dst)]>;
728
729// v2f64 extract element 1 is always custom lowered to unpack high to low
730// and extract element 0 so the non-store version isn't too horrible.
Evan Chengb783fa32007-07-19 01:14:50 +0000731def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000732 "movhps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000733 [(store (f64 (vector_extract
734 (v2f64 (vector_shuffle
735 (bc_v2f64 (v4f32 VR128:$src)), (undef),
736 UNPCKH_shuffle_mask)), (iPTR 0))),
737 addr:$dst)]>;
738
Evan Cheng3ea4d672008-03-05 08:19:16 +0000739let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000740let AddedComplexity = 15 in {
Evan Chengb783fa32007-07-19 01:14:50 +0000741def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000742 "movlhps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000743 [(set VR128:$dst,
744 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
745 MOVHP_shuffle_mask)))]>;
746
Evan Chengb783fa32007-07-19 01:14:50 +0000747def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000748 "movhlps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000749 [(set VR128:$dst,
750 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
751 MOVHLPS_shuffle_mask)))]>;
752} // AddedComplexity
Evan Cheng3ea4d672008-03-05 08:19:16 +0000753} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000754
755
756
757// Arithmetic
758
759/// sse1_fp_unop_rm - SSE1 unops come in both scalar and vector forms.
760///
761/// In addition, we also have a special variant of the scalar form here to
762/// represent the associated intrinsic operation. This form is unlike the
763/// plain scalar form, in that it takes an entire vector (instead of a
764/// scalar) and leaves the top elements undefined.
765///
766/// And, we have a special variant form for a full-vector intrinsic form.
767///
768/// These four forms can each have a reg or a mem operand, so there are a
769/// total of eight "instructions".
770///
771multiclass sse1_fp_unop_rm<bits<8> opc, string OpcodeStr,
772 SDNode OpNode,
773 Intrinsic F32Int,
774 Intrinsic V4F32Int,
775 bit Commutable = 0> {
776 // Scalar operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000777 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000778 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000779 [(set FR32:$dst, (OpNode FR32:$src))]> {
780 let isCommutable = Commutable;
781 }
782
783 // Scalar operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000784 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000785 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000786 [(set FR32:$dst, (OpNode (load addr:$src)))]>;
787
788 // Vector operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000789 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000790 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000791 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]> {
792 let isCommutable = Commutable;
793 }
794
795 // Vector operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000796 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000797 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +0000798 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000799
800 // Intrinsic operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000801 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000802 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000803 [(set VR128:$dst, (F32Int VR128:$src))]> {
804 let isCommutable = Commutable;
805 }
806
807 // Intrinsic operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000808 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000809 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000810 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
811
812 // Vector intrinsic operation, reg
Evan Chengb783fa32007-07-19 01:14:50 +0000813 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000814 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000815 [(set VR128:$dst, (V4F32Int VR128:$src))]> {
816 let isCommutable = Commutable;
817 }
818
819 // Vector intrinsic operation, mem
Dan Gohmanc747be52007-08-02 21:06:40 +0000820 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000821 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
Evan Cheng00b66ef2008-05-23 00:37:07 +0000822 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000823}
824
825// Square root.
826defm SQRT : sse1_fp_unop_rm<0x51, "sqrt", fsqrt,
827 int_x86_sse_sqrt_ss, int_x86_sse_sqrt_ps>;
828
829// Reciprocal approximations. Note that these typically require refinement
830// in order to obtain suitable precision.
831defm RSQRT : sse1_fp_unop_rm<0x52, "rsqrt", X86frsqrt,
832 int_x86_sse_rsqrt_ss, int_x86_sse_rsqrt_ps>;
833defm RCP : sse1_fp_unop_rm<0x53, "rcp", X86frcp,
834 int_x86_sse_rcp_ss, int_x86_sse_rcp_ps>;
835
836// Logical
Evan Cheng3ea4d672008-03-05 08:19:16 +0000837let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000838 let isCommutable = 1 in {
839 def ANDPSrr : PSI<0x54, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000840 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000841 "andps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000842 [(set VR128:$dst, (v2i64
843 (and VR128:$src1, VR128:$src2)))]>;
844 def ORPSrr : PSI<0x56, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000845 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000846 "orps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000847 [(set VR128:$dst, (v2i64
848 (or VR128:$src1, VR128:$src2)))]>;
849 def XORPSrr : PSI<0x57, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000850 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000851 "xorps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000852 [(set VR128:$dst, (v2i64
853 (xor VR128:$src1, VR128:$src2)))]>;
854 }
855
856 def ANDPSrm : PSI<0x54, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000857 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000858 "andps\t{$src2, $dst|$dst, $src2}",
Evan Cheng8e92cd12007-07-19 23:34:10 +0000859 [(set VR128:$dst, (and (bc_v2i64 (v4f32 VR128:$src1)),
860 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000861 def ORPSrm : PSI<0x56, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000862 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000863 "orps\t{$src2, $dst|$dst, $src2}",
Evan Cheng8e92cd12007-07-19 23:34:10 +0000864 [(set VR128:$dst, (or (bc_v2i64 (v4f32 VR128:$src1)),
865 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000866 def XORPSrm : PSI<0x57, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000867 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000868 "xorps\t{$src2, $dst|$dst, $src2}",
Evan Cheng8e92cd12007-07-19 23:34:10 +0000869 [(set VR128:$dst, (xor (bc_v2i64 (v4f32 VR128:$src1)),
870 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000871 def ANDNPSrr : PSI<0x55, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000872 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000873 "andnps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000874 [(set VR128:$dst,
875 (v2i64 (and (xor VR128:$src1,
876 (bc_v2i64 (v4i32 immAllOnesV))),
877 VR128:$src2)))]>;
878 def ANDNPSrm : PSI<0x55, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000879 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000880 "andnps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000881 [(set VR128:$dst,
Evan Cheng8e92cd12007-07-19 23:34:10 +0000882 (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000883 (bc_v2i64 (v4i32 immAllOnesV))),
Evan Cheng8e92cd12007-07-19 23:34:10 +0000884 (memopv2i64 addr:$src2))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000885}
886
Evan Cheng3ea4d672008-03-05 08:19:16 +0000887let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000888 def CMPPSrri : PSIi8<0xC2, MRMSrcReg,
Nate Begeman061db5f2008-05-12 20:34:32 +0000889 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
890 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
891 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
892 VR128:$src, imm:$cc))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000893 def CMPPSrmi : PSIi8<0xC2, MRMSrcMem,
Nate Begeman061db5f2008-05-12 20:34:32 +0000894 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
895 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
896 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
Evan Cheng00b66ef2008-05-23 00:37:07 +0000897 (memop addr:$src), imm:$cc))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000898}
Nate Begeman061db5f2008-05-12 20:34:32 +0000899def : Pat<(v4i32 (vsetcc (v4f32 VR128:$src1), VR128:$src2, cond:$cc)),
900 (CMPPSrri VR128:$src1, VR128:$src2, (SSE_CC_imm cond:$cc))>;
901def : Pat<(v4i32 (vsetcc (v4f32 VR128:$src1), (memop addr:$src2), cond:$cc)),
902 (CMPPSrmi VR128:$src1, addr:$src2, (SSE_CC_imm cond:$cc))>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000903
904// Shuffle and unpack instructions
Evan Cheng3ea4d672008-03-05 08:19:16 +0000905let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000906 let isConvertibleToThreeAddress = 1 in // Convert to pshufd
907 def SHUFPSrri : PSIi8<0xC6, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000908 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000909 VR128:$src2, i32i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +0000910 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000911 [(set VR128:$dst,
912 (v4f32 (vector_shuffle
913 VR128:$src1, VR128:$src2,
914 SHUFP_shuffle_mask:$src3)))]>;
915 def SHUFPSrmi : PSIi8<0xC6, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000916 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000917 f128mem:$src2, i32i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +0000918 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000919 [(set VR128:$dst,
920 (v4f32 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +0000921 VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000922 SHUFP_shuffle_mask:$src3)))]>;
923
924 let AddedComplexity = 10 in {
925 def UNPCKHPSrr : PSI<0x15, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000926 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000927 "unpckhps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000928 [(set VR128:$dst,
929 (v4f32 (vector_shuffle
930 VR128:$src1, VR128:$src2,
931 UNPCKH_shuffle_mask)))]>;
932 def UNPCKHPSrm : PSI<0x15, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000933 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000934 "unpckhps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000935 [(set VR128:$dst,
936 (v4f32 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +0000937 VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000938 UNPCKH_shuffle_mask)))]>;
939
940 def UNPCKLPSrr : PSI<0x14, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000941 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000942 "unpcklps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000943 [(set VR128:$dst,
944 (v4f32 (vector_shuffle
945 VR128:$src1, VR128:$src2,
946 UNPCKL_shuffle_mask)))]>;
947 def UNPCKLPSrm : PSI<0x14, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000948 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000949 "unpcklps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000950 [(set VR128:$dst,
951 (v4f32 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +0000952 VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000953 UNPCKL_shuffle_mask)))]>;
954 } // AddedComplexity
Evan Cheng3ea4d672008-03-05 08:19:16 +0000955} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000956
957// Mask creation
Evan Chengb783fa32007-07-19 01:14:50 +0000958def MOVMSKPSrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000959 "movmskps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000960 [(set GR32:$dst, (int_x86_sse_movmsk_ps VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000961def MOVMSKPDrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000962 "movmskpd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000963 [(set GR32:$dst, (int_x86_sse2_movmsk_pd VR128:$src))]>;
964
Evan Chengd1d68072008-03-08 00:58:38 +0000965// Prefetch intrinsic.
966def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
967 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
968def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
969 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
970def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
971 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
972def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
973 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000974
975// Non-temporal stores
Evan Chengb783fa32007-07-19 01:14:50 +0000976def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000977 "movntps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000978 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
979
980// Load, store, and memory fence
Evan Chengb783fa32007-07-19 01:14:50 +0000981def SFENCE : PSI<0xAE, MRM7m, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000982
983// MXCSR register
Evan Chengb783fa32007-07-19 01:14:50 +0000984def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000985 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000986def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
Dan Gohman91888f02007-07-31 20:11:57 +0000987 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000988
989// Alias instructions that map zero vector to pxor / xorp* for sse.
Chris Lattner17dab4a2008-01-10 05:45:39 +0000990let isReMaterializable = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000991def V_SET0 : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +0000992 "xorps\t$dst, $dst",
Chris Lattnere6aa3862007-11-25 00:24:49 +0000993 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000994
Evan Chenga15896e2008-03-12 07:02:50 +0000995let Predicates = [HasSSE1] in {
996 def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
997 def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
998 def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
999 def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
1000 def : Pat<(v4f32 immAllZerosV), (V_SET0)>;
1001}
1002
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001003// FR32 to 128-bit vector conversion.
Evan Chengb783fa32007-07-19 01:14:50 +00001004def MOVSS2PSrr : SSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001005 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001006 [(set VR128:$dst,
1007 (v4f32 (scalar_to_vector FR32:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001008def MOVSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001009 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001010 [(set VR128:$dst,
1011 (v4f32 (scalar_to_vector (loadf32 addr:$src))))]>;
1012
1013// FIXME: may not be able to eliminate this movss with coalescing the src and
1014// dest register classes are different. We really want to write this pattern
1015// like this:
1016// def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
1017// (f32 FR32:$src)>;
Evan Chengb783fa32007-07-19 01:14:50 +00001018def MOVPS2SSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001019 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001020 [(set FR32:$dst, (vector_extract (v4f32 VR128:$src),
1021 (iPTR 0)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001022def MOVPS2SSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001023 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001024 [(store (f32 (vector_extract (v4f32 VR128:$src),
1025 (iPTR 0))), addr:$dst)]>;
1026
1027
1028// Move to lower bits of a VR128, leaving upper bits alone.
1029// Three operand (but two address) aliases.
Evan Cheng3ea4d672008-03-05 08:19:16 +00001030let Constraints = "$src1 = $dst" in {
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001031let neverHasSideEffects = 1 in
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001032 def MOVLSS2PSrr : SSI<0x10, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001033 (outs VR128:$dst), (ins VR128:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001034 "movss\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001035
1036 let AddedComplexity = 15 in
1037 def MOVLPSrr : SSI<0x10, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001038 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001039 "movss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001040 [(set VR128:$dst,
1041 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
1042 MOVL_shuffle_mask)))]>;
1043}
1044
1045// Move to lower bits of a VR128 and zeroing upper bits.
1046// Loading from memory automatically zeroing upper bits.
1047let AddedComplexity = 20 in
Evan Chengb783fa32007-07-19 01:14:50 +00001048def MOVZSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001049 "movss\t{$src, $dst|$dst, $src}",
Evan Chenge9b9c672008-05-09 21:53:03 +00001050 [(set VR128:$dst, (v4f32 (X86vzmovl (v4f32 (scalar_to_vector
Evan Cheng40ee6e52008-05-08 00:57:18 +00001051 (loadf32 addr:$src))))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001052
Evan Cheng056afe12008-05-20 18:24:47 +00001053def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
Evan Cheng40ee6e52008-05-08 00:57:18 +00001054 (MOVZSS2PSrm addr:$src)>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001055
1056//===----------------------------------------------------------------------===//
1057// SSE2 Instructions
1058//===----------------------------------------------------------------------===//
1059
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001060// Move Instructions
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001061let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001062def MOVSDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001063 "movsd\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +00001064let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001065def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001066 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001067 [(set FR64:$dst, (loadf64 addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001068def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001069 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001070 [(store FR64:$src, addr:$dst)]>;
1071
1072// Conversion instructions
Evan Chengb783fa32007-07-19 01:14:50 +00001073def CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001074 "cvttsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001075 [(set GR32:$dst, (fp_to_sint FR64:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001076def CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001077 "cvttsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001078 [(set GR32:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001079def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001080 "cvtsd2ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001081 [(set FR32:$dst, (fround FR64:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001082def CVTSD2SSrm : SDI<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001083 "cvtsd2ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001084 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001085def CVTSI2SDrr : SDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001086 "cvtsi2sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001087 [(set FR64:$dst, (sint_to_fp GR32:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001088def CVTSI2SDrm : SDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001089 "cvtsi2sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001090 [(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
1091
1092// SSE2 instructions with XS prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001093def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001094 "cvtss2sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001095 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
1096 Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001097def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001098 "cvtss2sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001099 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
1100 Requires<[HasSSE2]>;
1101
1102// Match intrinsics which expect XMM operand(s).
Evan Chengb783fa32007-07-19 01:14:50 +00001103def Int_CVTSD2SIrr : SDI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001104 "cvtsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001105 [(set GR32:$dst, (int_x86_sse2_cvtsd2si VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001106def Int_CVTSD2SIrm : SDI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001107 "cvtsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001108 [(set GR32:$dst, (int_x86_sse2_cvtsd2si
1109 (load addr:$src)))]>;
1110
Dale Johannesen1fbb4a52007-10-30 22:15:38 +00001111// Match intrinisics which expect MM and XMM operand(s).
1112def Int_CVTPD2PIrr : PDI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
1113 "cvtpd2pi\t{$src, $dst|$dst, $src}",
1114 [(set VR64:$dst, (int_x86_sse_cvtpd2pi VR128:$src))]>;
1115def Int_CVTPD2PIrm : PDI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
1116 "cvtpd2pi\t{$src, $dst|$dst, $src}",
1117 [(set VR64:$dst, (int_x86_sse_cvtpd2pi
Evan Cheng00b66ef2008-05-23 00:37:07 +00001118 (memop addr:$src)))]>;
Dale Johannesen1fbb4a52007-10-30 22:15:38 +00001119def Int_CVTTPD2PIrr: PDI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
1120 "cvttpd2pi\t{$src, $dst|$dst, $src}",
1121 [(set VR64:$dst, (int_x86_sse_cvttpd2pi VR128:$src))]>;
1122def Int_CVTTPD2PIrm: PDI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
1123 "cvttpd2pi\t{$src, $dst|$dst, $src}",
1124 [(set VR64:$dst, (int_x86_sse_cvttpd2pi
Evan Cheng00b66ef2008-05-23 00:37:07 +00001125 (memop addr:$src)))]>;
Dale Johannesen1fbb4a52007-10-30 22:15:38 +00001126def Int_CVTPI2PDrr : PDI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
1127 "cvtpi2pd\t{$src, $dst|$dst, $src}",
1128 [(set VR128:$dst, (int_x86_sse_cvtpi2pd VR64:$src))]>;
1129def Int_CVTPI2PDrm : PDI<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1130 "cvtpi2pd\t{$src, $dst|$dst, $src}",
1131 [(set VR128:$dst, (int_x86_sse_cvtpi2pd
1132 (load addr:$src)))]>;
1133
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001134// Aliases for intrinsics
Evan Chengb783fa32007-07-19 01:14:50 +00001135def Int_CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001136 "cvttsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001137 [(set GR32:$dst,
1138 (int_x86_sse2_cvttsd2si VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001139def Int_CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001140 "cvttsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001141 [(set GR32:$dst, (int_x86_sse2_cvttsd2si
1142 (load addr:$src)))]>;
1143
1144// Comparison instructions
Evan Cheng3ea4d672008-03-05 08:19:16 +00001145let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
Evan Cheng653c7ac2007-12-20 19:57:09 +00001146 def CMPSDrr : SDIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001147 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001148 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001149let mayLoad = 1 in
Evan Cheng653c7ac2007-12-20 19:57:09 +00001150 def CMPSDrm : SDIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001151 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001152 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001153}
1154
Evan Cheng950aac02007-09-25 01:57:46 +00001155let Defs = [EFLAGS] in {
Evan Chengb783fa32007-07-19 01:14:50 +00001156def UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001157 "ucomisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001158 [(X86cmp FR64:$src1, FR64:$src2), (implicit EFLAGS)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001159def UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001160 "ucomisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001161 [(X86cmp FR64:$src1, (loadf64 addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +00001162 (implicit EFLAGS)]>;
1163}
1164
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001165// Aliases to match intrinsics which expect XMM operand(s).
Evan Cheng3ea4d672008-03-05 08:19:16 +00001166let Constraints = "$src1 = $dst" in {
Evan Cheng653c7ac2007-12-20 19:57:09 +00001167 def Int_CMPSDrr : SDIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001168 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001169 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001170 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1171 VR128:$src, imm:$cc))]>;
Evan Cheng653c7ac2007-12-20 19:57:09 +00001172 def Int_CMPSDrm : SDIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001173 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001174 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001175 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1176 (load addr:$src), imm:$cc))]>;
1177}
1178
Evan Cheng950aac02007-09-25 01:57:46 +00001179let Defs = [EFLAGS] in {
Evan Chengb783fa32007-07-19 01:14:50 +00001180def Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001181 "ucomisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001182 [(X86ucomi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1183 (implicit EFLAGS)]>;
1184def Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001185 "ucomisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001186 [(X86ucomi (v2f64 VR128:$src1), (load addr:$src2)),
1187 (implicit EFLAGS)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001188
Evan Chengb783fa32007-07-19 01:14:50 +00001189def Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001190 "comisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001191 [(X86comi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1192 (implicit EFLAGS)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001193def Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001194 "comisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001195 [(X86comi (v2f64 VR128:$src1), (load addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +00001196 (implicit EFLAGS)]>;
1197} // Defs = EFLAGS]
1198
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001199// Aliases of packed SSE2 instructions for scalar use. These all have names that
1200// start with 'Fs'.
1201
1202// Alias instructions that map fld0 to pxor for sse.
Chris Lattner17dab4a2008-01-10 05:45:39 +00001203let isReMaterializable = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001204def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +00001205 "pxor\t$dst, $dst", [(set FR64:$dst, fpimm0)]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001206 Requires<[HasSSE2]>, TB, OpSize;
1207
1208// Alias instruction to do FR64 reg-to-reg copy using movapd. Upper bits are
1209// disregarded.
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001210let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001211def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001212 "movapd\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001213
1214// Alias instruction to load FR64 from f128mem using movapd. Upper bits are
1215// disregarded.
Chris Lattner1a1932c2008-01-06 23:38:27 +00001216let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001217def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001218 "movapd\t{$src, $dst|$dst, $src}",
Dan Gohman11821702007-07-27 17:16:43 +00001219 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001220
1221// Alias bitwise logical operations using SSE logical ops on packed FP values.
Evan Cheng3ea4d672008-03-05 08:19:16 +00001222let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001223let isCommutable = 1 in {
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001224 def FsANDPDrr : PDI<0x54, MRMSrcReg, (outs FR64:$dst),
1225 (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001226 "andpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001227 [(set FR64:$dst, (X86fand FR64:$src1, FR64:$src2))]>;
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001228 def FsORPDrr : PDI<0x56, MRMSrcReg, (outs FR64:$dst),
1229 (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001230 "orpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001231 [(set FR64:$dst, (X86for FR64:$src1, FR64:$src2))]>;
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001232 def FsXORPDrr : PDI<0x57, MRMSrcReg, (outs FR64:$dst),
1233 (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001234 "xorpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001235 [(set FR64:$dst, (X86fxor FR64:$src1, FR64:$src2))]>;
1236}
1237
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001238def FsANDPDrm : PDI<0x54, MRMSrcMem, (outs FR64:$dst),
1239 (ins FR64:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001240 "andpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001241 [(set FR64:$dst, (X86fand FR64:$src1,
Dan Gohman11821702007-07-27 17:16:43 +00001242 (memopfsf64 addr:$src2)))]>;
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001243def FsORPDrm : PDI<0x56, MRMSrcMem, (outs FR64:$dst),
1244 (ins FR64:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001245 "orpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001246 [(set FR64:$dst, (X86for FR64:$src1,
Dan Gohman11821702007-07-27 17:16:43 +00001247 (memopfsf64 addr:$src2)))]>;
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001248def FsXORPDrm : PDI<0x57, MRMSrcMem, (outs FR64:$dst),
1249 (ins FR64:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001250 "xorpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001251 [(set FR64:$dst, (X86fxor FR64:$src1,
Dan Gohman11821702007-07-27 17:16:43 +00001252 (memopfsf64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001253
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001254let neverHasSideEffects = 1 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001255def FsANDNPDrr : PDI<0x55, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001256 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001257 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001258let mayLoad = 1 in
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001259def FsANDNPDrm : PDI<0x55, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001260 (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001261 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001262}
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001263}
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001264
1265/// basic_sse2_fp_binop_rm - SSE2 binops come in both scalar and vector forms.
1266///
1267/// In addition, we also have a special variant of the scalar form here to
1268/// represent the associated intrinsic operation. This form is unlike the
1269/// plain scalar form, in that it takes an entire vector (instead of a scalar)
1270/// and leaves the top elements undefined.
1271///
1272/// These three forms can each be reg+reg or reg+mem, so there are a total of
1273/// six "instructions".
1274///
Evan Cheng3ea4d672008-03-05 08:19:16 +00001275let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001276multiclass basic_sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1277 SDNode OpNode, Intrinsic F64Int,
1278 bit Commutable = 0> {
1279 // Scalar operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001280 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001281 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001282 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1283 let isCommutable = Commutable;
1284 }
1285
1286 // Scalar operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001287 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001288 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001289 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1290
1291 // Vector operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001292 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001293 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001294 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1295 let isCommutable = Commutable;
1296 }
1297
1298 // Vector operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001299 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001300 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +00001301 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001302
1303 // Intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001304 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001305 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001306 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1307 let isCommutable = Commutable;
1308 }
1309
1310 // Intrinsic operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001311 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001312 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001313 [(set VR128:$dst, (F64Int VR128:$src1,
1314 sse_load_f64:$src2))]>;
1315}
1316}
1317
1318// Arithmetic instructions
1319defm ADD : basic_sse2_fp_binop_rm<0x58, "add", fadd, int_x86_sse2_add_sd, 1>;
1320defm MUL : basic_sse2_fp_binop_rm<0x59, "mul", fmul, int_x86_sse2_mul_sd, 1>;
1321defm SUB : basic_sse2_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse2_sub_sd>;
1322defm DIV : basic_sse2_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse2_div_sd>;
1323
1324/// sse2_fp_binop_rm - Other SSE2 binops
1325///
1326/// This multiclass is like basic_sse2_fp_binop_rm, with the addition of
1327/// instructions for a full-vector intrinsic form. Operations that map
1328/// onto C operators don't use this form since they just use the plain
1329/// vector form instead of having a separate vector intrinsic form.
1330///
1331/// This provides a total of eight "instructions".
1332///
Evan Cheng3ea4d672008-03-05 08:19:16 +00001333let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001334multiclass sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1335 SDNode OpNode,
1336 Intrinsic F64Int,
1337 Intrinsic V2F64Int,
1338 bit Commutable = 0> {
1339
1340 // Scalar operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001341 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001342 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001343 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1344 let isCommutable = Commutable;
1345 }
1346
1347 // Scalar operation, reg+mem.
Evan Cheng00b66ef2008-05-23 00:37:07 +00001348 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
1349 (ins FR64:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001350 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001351 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1352
1353 // Vector operation, reg+reg.
Evan Cheng00b66ef2008-05-23 00:37:07 +00001354 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
1355 (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001356 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001357 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1358 let isCommutable = Commutable;
1359 }
1360
1361 // Vector operation, reg+mem.
Evan Cheng00b66ef2008-05-23 00:37:07 +00001362 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
1363 (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001364 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Evan Cheng00b66ef2008-05-23 00:37:07 +00001365 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001366
1367 // Intrinsic operation, reg+reg.
Evan Cheng00b66ef2008-05-23 00:37:07 +00001368 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst),
1369 (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001370 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001371 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1372 let isCommutable = Commutable;
1373 }
1374
1375 // Intrinsic operation, reg+mem.
Evan Cheng00b66ef2008-05-23 00:37:07 +00001376 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
1377 (ins VR128:$src1, sdmem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001378 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001379 [(set VR128:$dst, (F64Int VR128:$src1,
1380 sse_load_f64:$src2))]>;
1381
1382 // Vector intrinsic operation, reg+reg.
Evan Cheng00b66ef2008-05-23 00:37:07 +00001383 def PDrr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst),
1384 (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001385 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001386 [(set VR128:$dst, (V2F64Int VR128:$src1, VR128:$src2))]> {
1387 let isCommutable = Commutable;
1388 }
1389
1390 // Vector intrinsic operation, reg+mem.
Evan Cheng00b66ef2008-05-23 00:37:07 +00001391 def PDrm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst),
1392 (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001393 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Evan Cheng00b66ef2008-05-23 00:37:07 +00001394 [(set VR128:$dst, (V2F64Int VR128:$src1,
1395 (memopv2f64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001396}
1397}
1398
1399defm MAX : sse2_fp_binop_rm<0x5F, "max", X86fmax,
1400 int_x86_sse2_max_sd, int_x86_sse2_max_pd>;
1401defm MIN : sse2_fp_binop_rm<0x5D, "min", X86fmin,
1402 int_x86_sse2_min_sd, int_x86_sse2_min_pd>;
1403
1404//===----------------------------------------------------------------------===//
1405// SSE packed FP Instructions
1406
1407// Move Instructions
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001408let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001409def MOVAPDrr : PDI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001410 "movapd\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +00001411let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001412def MOVAPDrm : PDI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001413 "movapd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001414 [(set VR128:$dst, (alignedloadv2f64 addr:$src))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001415
Evan Chengb783fa32007-07-19 01:14:50 +00001416def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001417 "movapd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001418 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001419
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001420let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001421def MOVUPDrr : PDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001422 "movupd\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +00001423let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001424def MOVUPDrm : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001425 "movupd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001426 [(set VR128:$dst, (loadv2f64 addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001427def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001428 "movupd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001429 [(store (v2f64 VR128:$src), addr:$dst)]>;
1430
1431// Intrinsic forms of MOVUPD load and store
Evan Chengb783fa32007-07-19 01:14:50 +00001432def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001433 "movupd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001434 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001435def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001436 "movupd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001437 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001438
Evan Cheng3ea4d672008-03-05 08:19:16 +00001439let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001440 let AddedComplexity = 20 in {
1441 def MOVLPDrm : PDI<0x12, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001442 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001443 "movlpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001444 [(set VR128:$dst,
1445 (v2f64 (vector_shuffle VR128:$src1,
1446 (scalar_to_vector (loadf64 addr:$src2)),
1447 MOVLP_shuffle_mask)))]>;
1448 def MOVHPDrm : PDI<0x16, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001449 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001450 "movhpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001451 [(set VR128:$dst,
1452 (v2f64 (vector_shuffle VR128:$src1,
1453 (scalar_to_vector (loadf64 addr:$src2)),
1454 MOVHP_shuffle_mask)))]>;
1455 } // AddedComplexity
Evan Cheng3ea4d672008-03-05 08:19:16 +00001456} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001457
Evan Chengb783fa32007-07-19 01:14:50 +00001458def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001459 "movlpd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001460 [(store (f64 (vector_extract (v2f64 VR128:$src),
1461 (iPTR 0))), addr:$dst)]>;
1462
1463// v2f64 extract element 1 is always custom lowered to unpack high to low
1464// and extract element 0 so the non-store version isn't too horrible.
Evan Chengb783fa32007-07-19 01:14:50 +00001465def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001466 "movhpd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001467 [(store (f64 (vector_extract
1468 (v2f64 (vector_shuffle VR128:$src, (undef),
1469 UNPCKH_shuffle_mask)), (iPTR 0))),
1470 addr:$dst)]>;
1471
1472// SSE2 instructions without OpSize prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001473def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001474 "cvtdq2ps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001475 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1476 TB, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001477def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Evan Cheng14c97c32008-03-14 07:46:48 +00001478 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1479 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1480 (bitconvert (memopv2i64 addr:$src))))]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001481 TB, Requires<[HasSSE2]>;
1482
1483// SSE2 instructions with XS prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001484def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001485 "cvtdq2pd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001486 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1487 XS, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001488def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
Evan Cheng14c97c32008-03-14 07:46:48 +00001489 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1490 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1491 (bitconvert (memopv2i64 addr:$src))))]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001492 XS, Requires<[HasSSE2]>;
1493
Evan Chengb783fa32007-07-19 01:14:50 +00001494def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Evan Cheng14c97c32008-03-14 07:46:48 +00001495 "cvtps2dq\t{$src, $dst|$dst, $src}",
1496 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001497def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001498 "cvtps2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001499 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
Evan Cheng00b66ef2008-05-23 00:37:07 +00001500 (memop addr:$src)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001501// SSE2 packed instructions with XS prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001502def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001503 "cvttps2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001504 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))]>,
1505 XS, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001506def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001507 "cvttps2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001508 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
Evan Cheng00b66ef2008-05-23 00:37:07 +00001509 (memop addr:$src)))]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001510 XS, Requires<[HasSSE2]>;
1511
1512// SSE2 packed instructions with XD prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001513def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001514 "cvtpd2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001515 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1516 XD, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001517def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001518 "cvtpd2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001519 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
Evan Cheng00b66ef2008-05-23 00:37:07 +00001520 (memop addr:$src)))]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001521 XD, Requires<[HasSSE2]>;
1522
Evan Chengb783fa32007-07-19 01:14:50 +00001523def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001524 "cvttpd2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001525 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
Evan Cheng14c97c32008-03-14 07:46:48 +00001526def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001527 "cvttpd2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001528 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
Evan Cheng00b66ef2008-05-23 00:37:07 +00001529 (memop addr:$src)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001530
1531// SSE2 instructions without OpSize prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001532def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001533 "cvtps2pd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001534 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1535 TB, Requires<[HasSSE2]>;
Mon P Wangaa3f2662008-05-28 00:42:27 +00001536def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001537 "cvtps2pd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001538 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1539 (load addr:$src)))]>,
1540 TB, Requires<[HasSSE2]>;
1541
Evan Chengb783fa32007-07-19 01:14:50 +00001542def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001543 "cvtpd2ps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001544 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
Mon P Wangaa3f2662008-05-28 00:42:27 +00001545def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001546 "cvtpd2ps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001547 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
Evan Cheng00b66ef2008-05-23 00:37:07 +00001548 (memop addr:$src)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001549
1550// Match intrinsics which expect XMM operand(s).
1551// Aliases for intrinsics
Evan Cheng3ea4d672008-03-05 08:19:16 +00001552let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001553def Int_CVTSI2SDrr: SDI<0x2A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001554 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001555 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001556 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1557 GR32:$src2))]>;
1558def Int_CVTSI2SDrm: SDI<0x2A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001559 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001560 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001561 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1562 (loadi32 addr:$src2)))]>;
1563def Int_CVTSD2SSrr: SDI<0x5A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001564 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001565 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001566 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1567 VR128:$src2))]>;
1568def Int_CVTSD2SSrm: SDI<0x5A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001569 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001570 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001571 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1572 (load addr:$src2)))]>;
1573def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001574 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001575 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001576 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1577 VR128:$src2))]>, XS,
1578 Requires<[HasSSE2]>;
1579def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001580 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001581 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001582 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1583 (load addr:$src2)))]>, XS,
1584 Requires<[HasSSE2]>;
1585}
1586
1587// Arithmetic
1588
1589/// sse2_fp_unop_rm - SSE2 unops come in both scalar and vector forms.
1590///
1591/// In addition, we also have a special variant of the scalar form here to
1592/// represent the associated intrinsic operation. This form is unlike the
1593/// plain scalar form, in that it takes an entire vector (instead of a
1594/// scalar) and leaves the top elements undefined.
1595///
1596/// And, we have a special variant form for a full-vector intrinsic form.
1597///
1598/// These four forms can each have a reg or a mem operand, so there are a
1599/// total of eight "instructions".
1600///
1601multiclass sse2_fp_unop_rm<bits<8> opc, string OpcodeStr,
1602 SDNode OpNode,
1603 Intrinsic F64Int,
1604 Intrinsic V2F64Int,
1605 bit Commutable = 0> {
1606 // Scalar operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001607 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001608 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001609 [(set FR64:$dst, (OpNode FR64:$src))]> {
1610 let isCommutable = Commutable;
1611 }
1612
1613 // Scalar operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001614 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001615 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001616 [(set FR64:$dst, (OpNode (load addr:$src)))]>;
1617
1618 // Vector operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001619 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001620 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001621 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]> {
1622 let isCommutable = Commutable;
1623 }
1624
1625 // Vector operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001626 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001627 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +00001628 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001629
1630 // Intrinsic operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001631 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001632 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001633 [(set VR128:$dst, (F64Int VR128:$src))]> {
1634 let isCommutable = Commutable;
1635 }
1636
1637 // Intrinsic operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001638 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001639 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001640 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1641
1642 // Vector intrinsic operation, reg
Evan Chengb783fa32007-07-19 01:14:50 +00001643 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001644 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001645 [(set VR128:$dst, (V2F64Int VR128:$src))]> {
1646 let isCommutable = Commutable;
1647 }
1648
1649 // Vector intrinsic operation, mem
Dan Gohmanc747be52007-08-02 21:06:40 +00001650 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001651 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
Evan Cheng00b66ef2008-05-23 00:37:07 +00001652 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001653}
1654
1655// Square root.
1656defm SQRT : sse2_fp_unop_rm<0x51, "sqrt", fsqrt,
1657 int_x86_sse2_sqrt_sd, int_x86_sse2_sqrt_pd>;
1658
1659// There is no f64 version of the reciprocal approximation instructions.
1660
1661// Logical
Evan Cheng3ea4d672008-03-05 08:19:16 +00001662let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001663 let isCommutable = 1 in {
1664 def ANDPDrr : PDI<0x54, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001665 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001666 "andpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001667 [(set VR128:$dst,
1668 (and (bc_v2i64 (v2f64 VR128:$src1)),
1669 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1670 def ORPDrr : PDI<0x56, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001671 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001672 "orpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001673 [(set VR128:$dst,
1674 (or (bc_v2i64 (v2f64 VR128:$src1)),
1675 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1676 def XORPDrr : PDI<0x57, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001677 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001678 "xorpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001679 [(set VR128:$dst,
1680 (xor (bc_v2i64 (v2f64 VR128:$src1)),
1681 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1682 }
1683
1684 def ANDPDrm : PDI<0x54, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001685 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001686 "andpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001687 [(set VR128:$dst,
1688 (and (bc_v2i64 (v2f64 VR128:$src1)),
Evan Cheng8e92cd12007-07-19 23:34:10 +00001689 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001690 def ORPDrm : PDI<0x56, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001691 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001692 "orpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001693 [(set VR128:$dst,
1694 (or (bc_v2i64 (v2f64 VR128:$src1)),
Evan Cheng8e92cd12007-07-19 23:34:10 +00001695 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001696 def XORPDrm : PDI<0x57, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001697 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001698 "xorpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001699 [(set VR128:$dst,
1700 (xor (bc_v2i64 (v2f64 VR128:$src1)),
Evan Cheng8e92cd12007-07-19 23:34:10 +00001701 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001702 def ANDNPDrr : PDI<0x55, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001703 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001704 "andnpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001705 [(set VR128:$dst,
1706 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1707 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1708 def ANDNPDrm : PDI<0x55, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001709 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001710 "andnpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001711 [(set VR128:$dst,
1712 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
Evan Cheng8e92cd12007-07-19 23:34:10 +00001713 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001714}
1715
Evan Cheng3ea4d672008-03-05 08:19:16 +00001716let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001717 def CMPPDrri : PDIi8<0xC2, MRMSrcReg,
Evan Cheng14c97c32008-03-14 07:46:48 +00001718 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
1719 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1720 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
Nate Begeman061db5f2008-05-12 20:34:32 +00001721 VR128:$src, imm:$cc))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001722 def CMPPDrmi : PDIi8<0xC2, MRMSrcMem,
Evan Cheng14c97c32008-03-14 07:46:48 +00001723 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
1724 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1725 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
Evan Cheng00b66ef2008-05-23 00:37:07 +00001726 (memop addr:$src), imm:$cc))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001727}
Nate Begeman061db5f2008-05-12 20:34:32 +00001728def : Pat<(v2i64 (vsetcc (v2f64 VR128:$src1), VR128:$src2, cond:$cc)),
1729 (CMPPDrri VR128:$src1, VR128:$src2, (SSE_CC_imm cond:$cc))>;
1730def : Pat<(v2i64 (vsetcc (v2f64 VR128:$src1), (memop addr:$src2), cond:$cc)),
1731 (CMPPDrmi VR128:$src1, addr:$src2, (SSE_CC_imm cond:$cc))>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001732
1733// Shuffle and unpack instructions
Evan Cheng3ea4d672008-03-05 08:19:16 +00001734let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001735 def SHUFPDrri : PDIi8<0xC6, MRMSrcReg,
Evan Cheng14c97c32008-03-14 07:46:48 +00001736 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i8imm:$src3),
1737 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1738 [(set VR128:$dst, (v2f64 (vector_shuffle
1739 VR128:$src1, VR128:$src2,
1740 SHUFP_shuffle_mask:$src3)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001741 def SHUFPDrmi : PDIi8<0xC6, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001742 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001743 f128mem:$src2, i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +00001744 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001745 [(set VR128:$dst,
1746 (v2f64 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +00001747 VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001748 SHUFP_shuffle_mask:$src3)))]>;
1749
1750 let AddedComplexity = 10 in {
1751 def UNPCKHPDrr : PDI<0x15, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001752 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001753 "unpckhpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001754 [(set VR128:$dst,
1755 (v2f64 (vector_shuffle
1756 VR128:$src1, VR128:$src2,
1757 UNPCKH_shuffle_mask)))]>;
1758 def UNPCKHPDrm : PDI<0x15, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001759 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001760 "unpckhpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001761 [(set VR128:$dst,
1762 (v2f64 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +00001763 VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001764 UNPCKH_shuffle_mask)))]>;
1765
1766 def UNPCKLPDrr : PDI<0x14, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001767 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001768 "unpcklpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001769 [(set VR128:$dst,
1770 (v2f64 (vector_shuffle
1771 VR128:$src1, VR128:$src2,
1772 UNPCKL_shuffle_mask)))]>;
1773 def UNPCKLPDrm : PDI<0x14, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001774 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001775 "unpcklpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001776 [(set VR128:$dst,
1777 (v2f64 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +00001778 VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001779 UNPCKL_shuffle_mask)))]>;
1780 } // AddedComplexity
Evan Cheng3ea4d672008-03-05 08:19:16 +00001781} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001782
1783
1784//===----------------------------------------------------------------------===//
1785// SSE integer instructions
1786
1787// Move Instructions
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001788let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001789def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001790 "movdqa\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001791let isSimpleLoad = 1, mayLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001792def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001793 "movdqa\t{$src, $dst|$dst, $src}",
Evan Cheng51a49b22007-07-20 00:27:43 +00001794 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001795let mayStore = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001796def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001797 "movdqa\t{$src, $dst|$dst, $src}",
Evan Cheng51a49b22007-07-20 00:27:43 +00001798 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001799let isSimpleLoad = 1, mayLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001800def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001801 "movdqu\t{$src, $dst|$dst, $src}",
Evan Cheng51a49b22007-07-20 00:27:43 +00001802 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001803 XS, Requires<[HasSSE2]>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001804let mayStore = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001805def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001806 "movdqu\t{$src, $dst|$dst, $src}",
Evan Cheng51a49b22007-07-20 00:27:43 +00001807 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001808 XS, Requires<[HasSSE2]>;
1809
Dan Gohman4a4f1512007-07-18 20:23:34 +00001810// Intrinsic forms of MOVDQU load and store
Chris Lattner1a1932c2008-01-06 23:38:27 +00001811let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001812def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001813 "movdqu\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001814 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
1815 XS, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001816def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001817 "movdqu\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001818 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
1819 XS, Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001820
Evan Cheng88004752008-03-05 08:11:27 +00001821let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001822
1823multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
1824 bit Commutable = 0> {
Evan Chengb783fa32007-07-19 01:14:50 +00001825 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001826 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001827 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]> {
1828 let isCommutable = Commutable;
1829 }
Evan Chengb783fa32007-07-19 01:14:50 +00001830 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001831 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001832 [(set VR128:$dst, (IntId VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00001833 (bitconvert (memopv2i64 addr:$src2))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001834}
1835
Evan Chengf90f8f82008-05-03 00:52:09 +00001836multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
1837 string OpcodeStr,
1838 Intrinsic IntId, Intrinsic IntId2> {
1839 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1840 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1841 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
1842 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1843 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1844 [(set VR128:$dst, (IntId VR128:$src1,
1845 (bitconvert (memopv2i64 addr:$src2))))]>;
1846 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1847 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1848 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
1849}
1850
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001851/// PDI_binop_rm - Simple SSE2 binary operator.
1852multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
1853 ValueType OpVT, bit Commutable = 0> {
Evan Chengb783fa32007-07-19 01:14:50 +00001854 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001855 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001856 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]> {
1857 let isCommutable = Commutable;
1858 }
Evan Chengb783fa32007-07-19 01:14:50 +00001859 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001860 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001861 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00001862 (bitconvert (memopv2i64 addr:$src2)))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001863}
1864
1865/// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
1866///
1867/// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
1868/// to collapse (bitconvert VT to VT) into its operand.
1869///
1870multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
1871 bit Commutable = 0> {
Evan Chengb783fa32007-07-19 01:14:50 +00001872 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001873 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001874 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]> {
1875 let isCommutable = Commutable;
1876 }
Evan Chengb783fa32007-07-19 01:14:50 +00001877 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001878 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +00001879 [(set VR128:$dst, (OpNode VR128:$src1,(memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001880}
1881
Evan Cheng3ea4d672008-03-05 08:19:16 +00001882} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001883
1884// 128-bit Integer Arithmetic
1885
1886defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
1887defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
1888defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
1889defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
1890
1891defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
1892defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
1893defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
1894defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
1895
1896defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
1897defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
1898defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
1899defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
1900
1901defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
1902defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
1903defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
1904defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
1905
1906defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
1907
1908defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
1909defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w , 1>;
1910defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
1911
1912defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
1913
1914defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
1915defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
1916
1917
1918defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
1919defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
1920defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
1921defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
1922defm PSADBW : PDI_binop_rm_int<0xE0, "psadbw", int_x86_sse2_psad_bw, 1>;
1923
1924
Evan Chengf90f8f82008-05-03 00:52:09 +00001925defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
1926 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
1927defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
1928 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
1929defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
1930 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001931
Evan Chengf90f8f82008-05-03 00:52:09 +00001932defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
1933 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
1934defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
1935 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
Nate Begemanc2ca5f62008-05-13 17:52:09 +00001936defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
Evan Chengf90f8f82008-05-03 00:52:09 +00001937 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001938
Evan Chengf90f8f82008-05-03 00:52:09 +00001939defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
1940 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
Nate Begemand66fc342008-05-13 01:47:52 +00001941defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
Evan Chengf90f8f82008-05-03 00:52:09 +00001942 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001943
1944// 128-bit logical shifts.
Evan Cheng3ea4d672008-03-05 08:19:16 +00001945let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001946 def PSLLDQri : PDIi8<0x73, MRM7r,
Evan Chengb783fa32007-07-19 01:14:50 +00001947 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001948 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001949 def PSRLDQri : PDIi8<0x73, MRM3r,
Evan Chengb783fa32007-07-19 01:14:50 +00001950 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001951 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001952 // PSRADQri doesn't exist in SSE[1-3].
1953}
1954
1955let Predicates = [HasSSE2] in {
1956 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
1957 (v2i64 (PSLLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1958 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
1959 (v2i64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1960 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
1961 (v2f64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
Evan Chengdea99362008-05-29 08:22:04 +00001962
1963 // Shift up / down and insert zero's.
1964 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
1965 (v2i64 (PSLLDQri VR128:$src, (PSxLDQ_imm imm:$amt)))>;
1966 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
1967 (v2i64 (PSRLDQri VR128:$src, (PSxLDQ_imm imm:$amt)))>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001968}
1969
1970// Logical
1971defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
1972defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or , 1>;
1973defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
1974
Evan Cheng3ea4d672008-03-05 08:19:16 +00001975let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001976 def PANDNrr : PDI<0xDF, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001977 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001978 "pandn\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001979 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
1980 VR128:$src2)))]>;
1981
1982 def PANDNrm : PDI<0xDF, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001983 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001984 "pandn\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001985 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
Dan Gohman7dc19012007-08-02 21:17:01 +00001986 (memopv2i64 addr:$src2))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001987}
1988
1989// SSE2 Integer comparison
1990defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b>;
1991defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w>;
1992defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d>;
1993defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
1994defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
1995defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
1996
Nate Begeman78ca4f92008-05-12 23:09:43 +00001997def : Pat<(v16i8 (vsetcc (v16i8 VR128:$src1), VR128:$src2, SETEQ)),
1998 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
1999def : Pat<(v16i8 (vsetcc (v16i8 VR128:$src1), (memop addr:$src2), SETEQ)),
2000 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2001def : Pat<(v8i16 (vsetcc (v8i16 VR128:$src1), VR128:$src2, SETEQ)),
2002 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2003def : Pat<(v8i16 (vsetcc (v8i16 VR128:$src1), (memop addr:$src2), SETEQ)),
2004 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2005def : Pat<(v4i32 (vsetcc (v4i32 VR128:$src1), VR128:$src2, SETEQ)),
2006 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2007def : Pat<(v4i32 (vsetcc (v4i32 VR128:$src1), (memop addr:$src2), SETEQ)),
2008 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2009
2010def : Pat<(v16i8 (vsetcc (v16i8 VR128:$src1), VR128:$src2, SETGT)),
2011 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2012def : Pat<(v16i8 (vsetcc (v16i8 VR128:$src1), (memop addr:$src2), SETGT)),
2013 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2014def : Pat<(v8i16 (vsetcc (v8i16 VR128:$src1), VR128:$src2, SETGT)),
2015 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2016def : Pat<(v8i16 (vsetcc (v8i16 VR128:$src1), (memop addr:$src2), SETGT)),
2017 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2018def : Pat<(v4i32 (vsetcc (v4i32 VR128:$src1), VR128:$src2, SETGT)),
2019 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2020def : Pat<(v4i32 (vsetcc (v4i32 VR128:$src1), (memop addr:$src2), SETGT)),
2021 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2022
2023
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002024// Pack instructions
2025defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2026defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2027defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2028
2029// Shuffle and unpack instructions
2030def PSHUFDri : PDIi8<0x70, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002031 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002032 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002033 [(set VR128:$dst, (v4i32 (vector_shuffle
2034 VR128:$src1, (undef),
2035 PSHUFD_shuffle_mask:$src2)))]>;
2036def PSHUFDmi : PDIi8<0x70, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002037 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002038 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002039 [(set VR128:$dst, (v4i32 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00002040 (bc_v4i32(memopv2i64 addr:$src1)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002041 (undef),
2042 PSHUFD_shuffle_mask:$src2)))]>;
2043
2044// SSE2 with ImmT == Imm8 and XS prefix.
2045def PSHUFHWri : Ii8<0x70, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002046 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002047 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002048 [(set VR128:$dst, (v8i16 (vector_shuffle
2049 VR128:$src1, (undef),
2050 PSHUFHW_shuffle_mask:$src2)))]>,
2051 XS, Requires<[HasSSE2]>;
2052def PSHUFHWmi : Ii8<0x70, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002053 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002054 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002055 [(set VR128:$dst, (v8i16 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00002056 (bc_v8i16 (memopv2i64 addr:$src1)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002057 (undef),
2058 PSHUFHW_shuffle_mask:$src2)))]>,
2059 XS, Requires<[HasSSE2]>;
2060
2061// SSE2 with ImmT == Imm8 and XD prefix.
2062def PSHUFLWri : Ii8<0x70, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002063 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002064 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002065 [(set VR128:$dst, (v8i16 (vector_shuffle
2066 VR128:$src1, (undef),
2067 PSHUFLW_shuffle_mask:$src2)))]>,
2068 XD, Requires<[HasSSE2]>;
2069def PSHUFLWmi : Ii8<0x70, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002070 (outs VR128:$dst), (ins i128mem:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002071 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002072 [(set VR128:$dst, (v8i16 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00002073 (bc_v8i16 (memopv2i64 addr:$src1)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002074 (undef),
2075 PSHUFLW_shuffle_mask:$src2)))]>,
2076 XD, Requires<[HasSSE2]>;
2077
2078
Evan Cheng3ea4d672008-03-05 08:19:16 +00002079let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002080 def PUNPCKLBWrr : PDI<0x60, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002081 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002082 "punpcklbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002083 [(set VR128:$dst,
2084 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
2085 UNPCKL_shuffle_mask)))]>;
2086 def PUNPCKLBWrm : PDI<0x60, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002087 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002088 "punpcklbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002089 [(set VR128:$dst,
2090 (v16i8 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002091 (bc_v16i8 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002092 UNPCKL_shuffle_mask)))]>;
2093 def PUNPCKLWDrr : PDI<0x61, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002094 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002095 "punpcklwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002096 [(set VR128:$dst,
2097 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
2098 UNPCKL_shuffle_mask)))]>;
2099 def PUNPCKLWDrm : PDI<0x61, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002100 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002101 "punpcklwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002102 [(set VR128:$dst,
2103 (v8i16 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002104 (bc_v8i16 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002105 UNPCKL_shuffle_mask)))]>;
2106 def PUNPCKLDQrr : PDI<0x62, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002107 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002108 "punpckldq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002109 [(set VR128:$dst,
2110 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2111 UNPCKL_shuffle_mask)))]>;
2112 def PUNPCKLDQrm : PDI<0x62, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002113 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002114 "punpckldq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002115 [(set VR128:$dst,
2116 (v4i32 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002117 (bc_v4i32 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002118 UNPCKL_shuffle_mask)))]>;
2119 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002120 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002121 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002122 [(set VR128:$dst,
2123 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2124 UNPCKL_shuffle_mask)))]>;
2125 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002126 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002127 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002128 [(set VR128:$dst,
2129 (v2i64 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002130 (memopv2i64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002131 UNPCKL_shuffle_mask)))]>;
2132
2133 def PUNPCKHBWrr : PDI<0x68, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002134 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002135 "punpckhbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002136 [(set VR128:$dst,
2137 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
2138 UNPCKH_shuffle_mask)))]>;
2139 def PUNPCKHBWrm : PDI<0x68, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002140 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002141 "punpckhbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002142 [(set VR128:$dst,
2143 (v16i8 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002144 (bc_v16i8 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002145 UNPCKH_shuffle_mask)))]>;
2146 def PUNPCKHWDrr : PDI<0x69, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002147 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002148 "punpckhwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002149 [(set VR128:$dst,
2150 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
2151 UNPCKH_shuffle_mask)))]>;
2152 def PUNPCKHWDrm : PDI<0x69, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002153 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002154 "punpckhwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002155 [(set VR128:$dst,
2156 (v8i16 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002157 (bc_v8i16 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002158 UNPCKH_shuffle_mask)))]>;
2159 def PUNPCKHDQrr : PDI<0x6A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002160 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002161 "punpckhdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002162 [(set VR128:$dst,
2163 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2164 UNPCKH_shuffle_mask)))]>;
2165 def PUNPCKHDQrm : PDI<0x6A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002166 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002167 "punpckhdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002168 [(set VR128:$dst,
2169 (v4i32 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002170 (bc_v4i32 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002171 UNPCKH_shuffle_mask)))]>;
2172 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002173 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002174 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002175 [(set VR128:$dst,
2176 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2177 UNPCKH_shuffle_mask)))]>;
2178 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002179 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002180 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002181 [(set VR128:$dst,
2182 (v2i64 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002183 (memopv2i64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002184 UNPCKH_shuffle_mask)))]>;
2185}
2186
2187// Extract / Insert
2188def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002189 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002190 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002191 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
Nate Begemand77e59e2008-02-11 04:19:36 +00002192 imm:$src2))]>;
Evan Cheng3ea4d672008-03-05 08:19:16 +00002193let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002194 def PINSRWrri : PDIi8<0xC4, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002195 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002196 GR32:$src2, i32i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +00002197 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002198 [(set VR128:$dst,
Nate Begemand77e59e2008-02-11 04:19:36 +00002199 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002200 def PINSRWrmi : PDIi8<0xC4, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002201 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002202 i16mem:$src2, i32i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +00002203 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Nate Begemand77e59e2008-02-11 04:19:36 +00002204 [(set VR128:$dst,
2205 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2206 imm:$src3))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002207}
2208
2209// Mask creation
Evan Chengb783fa32007-07-19 01:14:50 +00002210def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002211 "pmovmskb\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002212 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2213
2214// Conditional store
Evan Cheng6e4d1d92007-09-11 19:55:27 +00002215let Uses = [EDI] in
Evan Chengb783fa32007-07-19 01:14:50 +00002216def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
Dan Gohman91888f02007-07-31 20:11:57 +00002217 "maskmovdqu\t{$mask, $src|$src, $mask}",
Evan Cheng6e4d1d92007-09-11 19:55:27 +00002218 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002219
2220// Non-temporal stores
Evan Chengb783fa32007-07-19 01:14:50 +00002221def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002222 "movntpd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002223 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002224def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002225 "movntdq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002226 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002227def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002228 "movnti\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002229 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2230 TB, Requires<[HasSSE2]>;
2231
2232// Flush cache
Evan Chengb783fa32007-07-19 01:14:50 +00002233def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002234 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002235 TB, Requires<[HasSSE2]>;
2236
2237// Load, store, and memory fence
Evan Chengb783fa32007-07-19 01:14:50 +00002238def LFENCE : I<0xAE, MRM5m, (outs), (ins),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002239 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002240def MFENCE : I<0xAE, MRM6m, (outs), (ins),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002241 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
2242
Andrew Lenharth785610d2008-02-16 01:24:58 +00002243//TODO: custom lower this so as to never even generate the noop
2244def : Pat<(membarrier (i8 imm:$ll), (i8 imm:$ls), (i8 imm:$sl), (i8 imm:$ss),
2245 (i8 0)), (NOOP)>;
2246def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
2247def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
2248def : Pat<(membarrier (i8 imm:$ll), (i8 imm:$ls), (i8 imm:$sl), (i8 imm:$ss),
2249 (i8 1)), (MFENCE)>;
2250
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002251// Alias instructions that map zero vector to pxor / xorp* for sse.
Chris Lattner17dab4a2008-01-10 05:45:39 +00002252let isReMaterializable = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00002253 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +00002254 "pcmpeqd\t$dst, $dst",
Chris Lattnere6aa3862007-11-25 00:24:49 +00002255 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002256
2257// FR64 to 128-bit vector conversion.
Evan Chengb783fa32007-07-19 01:14:50 +00002258def MOVSD2PDrr : SDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002259 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002260 [(set VR128:$dst,
2261 (v2f64 (scalar_to_vector FR64:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002262def MOVSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002263 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002264 [(set VR128:$dst,
2265 (v2f64 (scalar_to_vector (loadf64 addr:$src))))]>;
2266
Evan Chengb783fa32007-07-19 01:14:50 +00002267def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002268 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002269 [(set VR128:$dst,
2270 (v4i32 (scalar_to_vector GR32:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002271def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002272 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002273 [(set VR128:$dst,
2274 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2275
Evan Chengb783fa32007-07-19 01:14:50 +00002276def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002277 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002278 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2279
Evan Chengb783fa32007-07-19 01:14:50 +00002280def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002281 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002282 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2283
2284// SSE2 instructions with XS prefix
Evan Chengb783fa32007-07-19 01:14:50 +00002285def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002286 "movq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002287 [(set VR128:$dst,
2288 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
2289 Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002290def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002291 "movq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002292 [(store (i64 (vector_extract (v2i64 VR128:$src),
2293 (iPTR 0))), addr:$dst)]>;
2294
2295// FIXME: may not be able to eliminate this movss with coalescing the src and
2296// dest register classes are different. We really want to write this pattern
2297// like this:
2298// def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2299// (f32 FR32:$src)>;
Evan Chengb783fa32007-07-19 01:14:50 +00002300def MOVPD2SDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002301 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002302 [(set FR64:$dst, (vector_extract (v2f64 VR128:$src),
2303 (iPTR 0)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002304def MOVPD2SDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002305 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002306 [(store (f64 (vector_extract (v2f64 VR128:$src),
2307 (iPTR 0))), addr:$dst)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002308def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002309 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002310 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2311 (iPTR 0)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002312def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002313 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002314 [(store (i32 (vector_extract (v4i32 VR128:$src),
2315 (iPTR 0))), addr:$dst)]>;
2316
Evan Chengb783fa32007-07-19 01:14:50 +00002317def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002318 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002319 [(set GR32:$dst, (bitconvert FR32:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002320def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002321 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002322 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
2323
2324
2325// Move to lower bits of a VR128, leaving upper bits alone.
2326// Three operand (but two address) aliases.
Evan Cheng3ea4d672008-03-05 08:19:16 +00002327let Constraints = "$src1 = $dst" in {
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00002328 let neverHasSideEffects = 1 in
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002329 def MOVLSD2PDrr : SDI<0x10, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002330 (outs VR128:$dst), (ins VR128:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002331 "movsd\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002332
2333 let AddedComplexity = 15 in
2334 def MOVLPDrr : SDI<0x10, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002335 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002336 "movsd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002337 [(set VR128:$dst,
2338 (v2f64 (vector_shuffle VR128:$src1, VR128:$src2,
2339 MOVL_shuffle_mask)))]>;
2340}
2341
2342// Store / copy lower 64-bits of a XMM register.
Evan Chengb783fa32007-07-19 01:14:50 +00002343def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002344 "movq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002345 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
2346
2347// Move to lower bits of a VR128 and zeroing upper bits.
2348// Loading from memory automatically zeroing upper bits.
Evan Chengd743a5f2008-05-10 00:59:18 +00002349let AddedComplexity = 20 in {
2350def MOVZSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2351 "movsd\t{$src, $dst|$dst, $src}",
2352 [(set VR128:$dst,
2353 (v2f64 (X86vzmovl (v2f64 (scalar_to_vector
2354 (loadf64 addr:$src))))))]>;
Evan Cheng40ee6e52008-05-08 00:57:18 +00002355
Evan Cheng056afe12008-05-20 18:24:47 +00002356def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
2357 (MOVZSD2PDrm addr:$src)>;
2358def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
Evan Chengd743a5f2008-05-10 00:59:18 +00002359 (MOVZSD2PDrm addr:$src)>;
Evan Chenge9b9c672008-05-09 21:53:03 +00002360def : Pat<(v2f64 (X86vzload addr:$src)), (MOVZSD2PDrm addr:$src)>;
Evan Chengd743a5f2008-05-10 00:59:18 +00002361}
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002362
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002363// movd / movq to XMM register zero-extends
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002364let AddedComplexity = 15 in {
Evan Chengb783fa32007-07-19 01:14:50 +00002365def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002366 "movd\t{$src, $dst|$dst, $src}",
Evan Chenge9b9c672008-05-09 21:53:03 +00002367 [(set VR128:$dst, (v4i32 (X86vzmovl
Evan Cheng40ee6e52008-05-08 00:57:18 +00002368 (v4i32 (scalar_to_vector GR32:$src)))))]>;
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002369// This is X86-64 only.
2370def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2371 "mov{d|q}\t{$src, $dst|$dst, $src}",
Evan Chenge9b9c672008-05-09 21:53:03 +00002372 [(set VR128:$dst, (v2i64 (X86vzmovl
Evan Cheng40ee6e52008-05-08 00:57:18 +00002373 (v2i64 (scalar_to_vector GR64:$src)))))]>;
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002374}
2375
2376let AddedComplexity = 20 in {
Evan Chengb783fa32007-07-19 01:14:50 +00002377def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002378 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002379 [(set VR128:$dst,
Evan Chenge9b9c672008-05-09 21:53:03 +00002380 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
Evan Cheng40ee6e52008-05-08 00:57:18 +00002381 (loadi32 addr:$src))))))]>;
Evan Cheng3ad16c42008-05-22 18:56:56 +00002382
2383def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
2384 (MOVZDI2PDIrm addr:$src)>;
2385def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
2386 (MOVZDI2PDIrm addr:$src)>;
2387
Evan Chengb783fa32007-07-19 01:14:50 +00002388def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002389 "movq\t{$src, $dst|$dst, $src}",
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002390 [(set VR128:$dst,
Evan Chenge9b9c672008-05-09 21:53:03 +00002391 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
Evan Cheng40ee6e52008-05-08 00:57:18 +00002392 (loadi64 addr:$src))))))]>, XS,
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002393 Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002394
Evan Cheng3ad16c42008-05-22 18:56:56 +00002395def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
2396 (MOVZQI2PQIrm addr:$src)>;
2397def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
2398 (MOVZQI2PQIrm addr:$src)>;
Evan Chenge9b9c672008-05-09 21:53:03 +00002399def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
Evan Chengd743a5f2008-05-10 00:59:18 +00002400}
Evan Chenge9b9c672008-05-09 21:53:03 +00002401
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002402// Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
2403// IA32 document. movq xmm1, xmm2 does clear the high bits.
2404let AddedComplexity = 15 in
2405def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2406 "movq\t{$src, $dst|$dst, $src}",
Evan Chenge9b9c672008-05-09 21:53:03 +00002407 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002408 XS, Requires<[HasSSE2]>;
2409
Evan Cheng056afe12008-05-20 18:24:47 +00002410let AddedComplexity = 20 in {
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002411def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2412 "movq\t{$src, $dst|$dst, $src}",
Evan Chenge9b9c672008-05-09 21:53:03 +00002413 [(set VR128:$dst, (v2i64 (X86vzmovl
Evan Cheng056afe12008-05-20 18:24:47 +00002414 (loadv2i64 addr:$src))))]>,
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002415 XS, Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002416
Evan Cheng056afe12008-05-20 18:24:47 +00002417def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
2418 (MOVZPQILo2PQIrm addr:$src)>;
2419}
2420
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002421//===----------------------------------------------------------------------===//
2422// SSE3 Instructions
2423//===----------------------------------------------------------------------===//
2424
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002425// Move Instructions
Evan Chengb783fa32007-07-19 01:14:50 +00002426def MOVSHDUPrr : S3SI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002427 "movshdup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002428 [(set VR128:$dst, (v4f32 (vector_shuffle
2429 VR128:$src, (undef),
2430 MOVSHDUP_shuffle_mask)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002431def MOVSHDUPrm : S3SI<0x16, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002432 "movshdup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002433 [(set VR128:$dst, (v4f32 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00002434 (memopv4f32 addr:$src), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002435 MOVSHDUP_shuffle_mask)))]>;
2436
Evan Chengb783fa32007-07-19 01:14:50 +00002437def MOVSLDUPrr : S3SI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002438 "movsldup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002439 [(set VR128:$dst, (v4f32 (vector_shuffle
2440 VR128:$src, (undef),
2441 MOVSLDUP_shuffle_mask)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002442def MOVSLDUPrm : S3SI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002443 "movsldup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002444 [(set VR128:$dst, (v4f32 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00002445 (memopv4f32 addr:$src), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002446 MOVSLDUP_shuffle_mask)))]>;
2447
Evan Chengb783fa32007-07-19 01:14:50 +00002448def MOVDDUPrr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002449 "movddup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002450 [(set VR128:$dst, (v2f64 (vector_shuffle
2451 VR128:$src, (undef),
2452 SSE_splat_lo_mask)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002453def MOVDDUPrm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002454 "movddup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002455 [(set VR128:$dst,
2456 (v2f64 (vector_shuffle
2457 (scalar_to_vector (loadf64 addr:$src)),
2458 (undef),
2459 SSE_splat_lo_mask)))]>;
2460
2461// Arithmetic
Evan Cheng3ea4d672008-03-05 08:19:16 +00002462let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002463 def ADDSUBPSrr : S3DI<0xD0, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002464 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002465 "addsubps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002466 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
2467 VR128:$src2))]>;
2468 def ADDSUBPSrm : S3DI<0xD0, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002469 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002470 "addsubps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002471 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
Evan Cheng00b66ef2008-05-23 00:37:07 +00002472 (memop addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002473 def ADDSUBPDrr : S3I<0xD0, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002474 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002475 "addsubpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002476 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
2477 VR128:$src2))]>;
2478 def ADDSUBPDrm : S3I<0xD0, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002479 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002480 "addsubpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002481 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
Evan Cheng00b66ef2008-05-23 00:37:07 +00002482 (memop addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002483}
2484
Evan Chengb783fa32007-07-19 01:14:50 +00002485def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002486 "lddqu\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002487 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
2488
2489// Horizontal ops
2490class S3D_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
Evan Chengb783fa32007-07-19 01:14:50 +00002491 : S3DI<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002492 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002493 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
2494class S3D_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
Evan Chengb783fa32007-07-19 01:14:50 +00002495 : S3DI<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002496 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Evan Cheng00b66ef2008-05-23 00:37:07 +00002497 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, (memop addr:$src2))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002498class S3_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
Evan Chengb783fa32007-07-19 01:14:50 +00002499 : S3I<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002500 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002501 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
2502class S3_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
Evan Chengb783fa32007-07-19 01:14:50 +00002503 : S3I<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002504 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Evan Cheng00b66ef2008-05-23 00:37:07 +00002505 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, (memopv2f64 addr:$src2))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002506
Evan Cheng3ea4d672008-03-05 08:19:16 +00002507let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002508 def HADDPSrr : S3D_Intrr<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2509 def HADDPSrm : S3D_Intrm<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2510 def HADDPDrr : S3_Intrr <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2511 def HADDPDrm : S3_Intrm <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2512 def HSUBPSrr : S3D_Intrr<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2513 def HSUBPSrm : S3D_Intrm<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2514 def HSUBPDrr : S3_Intrr <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2515 def HSUBPDrm : S3_Intrm <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2516}
2517
2518// Thread synchronization
Evan Chengb783fa32007-07-19 01:14:50 +00002519def MONITOR : I<0xC8, RawFrm, (outs), (ins), "monitor",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002520 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002521def MWAIT : I<0xC9, RawFrm, (outs), (ins), "mwait",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002522 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
2523
2524// vector_shuffle v1, <undef> <1, 1, 3, 3>
2525let AddedComplexity = 15 in
2526def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2527 MOVSHDUP_shuffle_mask)),
2528 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2529let AddedComplexity = 20 in
Dan Gohman4a4f1512007-07-18 20:23:34 +00002530def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002531 MOVSHDUP_shuffle_mask)),
2532 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
2533
2534// vector_shuffle v1, <undef> <0, 0, 2, 2>
2535let AddedComplexity = 15 in
2536 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2537 MOVSLDUP_shuffle_mask)),
2538 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2539let AddedComplexity = 20 in
Dan Gohman4a4f1512007-07-18 20:23:34 +00002540 def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002541 MOVSLDUP_shuffle_mask)),
2542 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
2543
2544//===----------------------------------------------------------------------===//
2545// SSSE3 Instructions
2546//===----------------------------------------------------------------------===//
2547
Bill Wendling98680292007-08-10 06:22:27 +00002548/// SS3I_unop_rm_int_8 - Simple SSSE3 unary operator whose type is v*i8.
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002549multiclass SS3I_unop_rm_int_8<bits<8> opc, string OpcodeStr,
2550 Intrinsic IntId64, Intrinsic IntId128> {
2551 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
2552 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2553 [(set VR64:$dst, (IntId64 VR64:$src))]>;
Bill Wendling98680292007-08-10 06:22:27 +00002554
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002555 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
2556 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2557 [(set VR64:$dst,
2558 (IntId64 (bitconvert (memopv8i8 addr:$src))))]>;
2559
2560 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2561 (ins VR128:$src),
2562 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2563 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2564 OpSize;
2565
2566 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2567 (ins i128mem:$src),
2568 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2569 [(set VR128:$dst,
2570 (IntId128
2571 (bitconvert (memopv16i8 addr:$src))))]>, OpSize;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002572}
2573
Bill Wendling98680292007-08-10 06:22:27 +00002574/// SS3I_unop_rm_int_16 - Simple SSSE3 unary operator whose type is v*i16.
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002575multiclass SS3I_unop_rm_int_16<bits<8> opc, string OpcodeStr,
2576 Intrinsic IntId64, Intrinsic IntId128> {
2577 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2578 (ins VR64:$src),
2579 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2580 [(set VR64:$dst, (IntId64 VR64:$src))]>;
Bill Wendling98680292007-08-10 06:22:27 +00002581
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002582 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2583 (ins i64mem:$src),
2584 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2585 [(set VR64:$dst,
2586 (IntId64
2587 (bitconvert (memopv4i16 addr:$src))))]>;
2588
2589 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2590 (ins VR128:$src),
2591 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2592 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2593 OpSize;
2594
2595 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2596 (ins i128mem:$src),
2597 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2598 [(set VR128:$dst,
2599 (IntId128
2600 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
Bill Wendling98680292007-08-10 06:22:27 +00002601}
2602
2603/// SS3I_unop_rm_int_32 - Simple SSSE3 unary operator whose type is v*i32.
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002604multiclass SS3I_unop_rm_int_32<bits<8> opc, string OpcodeStr,
2605 Intrinsic IntId64, Intrinsic IntId128> {
2606 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2607 (ins VR64:$src),
2608 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2609 [(set VR64:$dst, (IntId64 VR64:$src))]>;
Bill Wendling98680292007-08-10 06:22:27 +00002610
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002611 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2612 (ins i64mem:$src),
2613 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2614 [(set VR64:$dst,
2615 (IntId64
2616 (bitconvert (memopv2i32 addr:$src))))]>;
2617
2618 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2619 (ins VR128:$src),
2620 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2621 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2622 OpSize;
2623
2624 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2625 (ins i128mem:$src),
2626 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2627 [(set VR128:$dst,
2628 (IntId128
2629 (bitconvert (memopv4i32 addr:$src))))]>, OpSize;
Bill Wendling98680292007-08-10 06:22:27 +00002630}
2631
2632defm PABSB : SS3I_unop_rm_int_8 <0x1C, "pabsb",
2633 int_x86_ssse3_pabs_b,
2634 int_x86_ssse3_pabs_b_128>;
2635defm PABSW : SS3I_unop_rm_int_16<0x1D, "pabsw",
2636 int_x86_ssse3_pabs_w,
2637 int_x86_ssse3_pabs_w_128>;
2638defm PABSD : SS3I_unop_rm_int_32<0x1E, "pabsd",
2639 int_x86_ssse3_pabs_d,
2640 int_x86_ssse3_pabs_d_128>;
2641
2642/// SS3I_binop_rm_int_8 - Simple SSSE3 binary operator whose type is v*i8.
Evan Cheng3ea4d672008-03-05 08:19:16 +00002643let Constraints = "$src1 = $dst" in {
Bill Wendling98680292007-08-10 06:22:27 +00002644 multiclass SS3I_binop_rm_int_8<bits<8> opc, string OpcodeStr,
2645 Intrinsic IntId64, Intrinsic IntId128,
2646 bit Commutable = 0> {
2647 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2648 (ins VR64:$src1, VR64:$src2),
2649 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2650 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2651 let isCommutable = Commutable;
2652 }
2653 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2654 (ins VR64:$src1, i64mem:$src2),
2655 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2656 [(set VR64:$dst,
2657 (IntId64 VR64:$src1,
2658 (bitconvert (memopv8i8 addr:$src2))))]>;
2659
2660 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2661 (ins VR128:$src1, VR128:$src2),
2662 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2663 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2664 OpSize {
2665 let isCommutable = Commutable;
2666 }
2667 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2668 (ins VR128:$src1, i128mem:$src2),
2669 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2670 [(set VR128:$dst,
2671 (IntId128 VR128:$src1,
2672 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
2673 }
2674}
2675
2676/// SS3I_binop_rm_int_16 - Simple SSSE3 binary operator whose type is v*i16.
Evan Cheng3ea4d672008-03-05 08:19:16 +00002677let Constraints = "$src1 = $dst" in {
Bill Wendling98680292007-08-10 06:22:27 +00002678 multiclass SS3I_binop_rm_int_16<bits<8> opc, string OpcodeStr,
2679 Intrinsic IntId64, Intrinsic IntId128,
2680 bit Commutable = 0> {
2681 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2682 (ins VR64:$src1, VR64:$src2),
2683 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2684 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2685 let isCommutable = Commutable;
2686 }
2687 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2688 (ins VR64:$src1, i64mem:$src2),
2689 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2690 [(set VR64:$dst,
2691 (IntId64 VR64:$src1,
2692 (bitconvert (memopv4i16 addr:$src2))))]>;
2693
2694 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2695 (ins VR128:$src1, VR128:$src2),
2696 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2697 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2698 OpSize {
2699 let isCommutable = Commutable;
2700 }
2701 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2702 (ins VR128:$src1, i128mem:$src2),
2703 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2704 [(set VR128:$dst,
2705 (IntId128 VR128:$src1,
2706 (bitconvert (memopv8i16 addr:$src2))))]>, OpSize;
2707 }
2708}
2709
2710/// SS3I_binop_rm_int_32 - Simple SSSE3 binary operator whose type is v*i32.
Evan Cheng3ea4d672008-03-05 08:19:16 +00002711let Constraints = "$src1 = $dst" in {
Bill Wendling98680292007-08-10 06:22:27 +00002712 multiclass SS3I_binop_rm_int_32<bits<8> opc, string OpcodeStr,
2713 Intrinsic IntId64, Intrinsic IntId128,
2714 bit Commutable = 0> {
2715 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2716 (ins VR64:$src1, VR64:$src2),
2717 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2718 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2719 let isCommutable = Commutable;
2720 }
2721 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2722 (ins VR64:$src1, i64mem:$src2),
2723 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2724 [(set VR64:$dst,
2725 (IntId64 VR64:$src1,
2726 (bitconvert (memopv2i32 addr:$src2))))]>;
2727
2728 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2729 (ins VR128:$src1, VR128:$src2),
2730 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2731 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2732 OpSize {
2733 let isCommutable = Commutable;
2734 }
2735 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2736 (ins VR128:$src1, i128mem:$src2),
2737 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2738 [(set VR128:$dst,
2739 (IntId128 VR128:$src1,
2740 (bitconvert (memopv4i32 addr:$src2))))]>, OpSize;
2741 }
2742}
2743
2744defm PHADDW : SS3I_binop_rm_int_16<0x01, "phaddw",
2745 int_x86_ssse3_phadd_w,
2746 int_x86_ssse3_phadd_w_128, 1>;
2747defm PHADDD : SS3I_binop_rm_int_32<0x02, "phaddd",
2748 int_x86_ssse3_phadd_d,
2749 int_x86_ssse3_phadd_d_128, 1>;
2750defm PHADDSW : SS3I_binop_rm_int_16<0x03, "phaddsw",
2751 int_x86_ssse3_phadd_sw,
2752 int_x86_ssse3_phadd_sw_128, 1>;
2753defm PHSUBW : SS3I_binop_rm_int_16<0x05, "phsubw",
2754 int_x86_ssse3_phsub_w,
2755 int_x86_ssse3_phsub_w_128>;
2756defm PHSUBD : SS3I_binop_rm_int_32<0x06, "phsubd",
2757 int_x86_ssse3_phsub_d,
2758 int_x86_ssse3_phsub_d_128>;
2759defm PHSUBSW : SS3I_binop_rm_int_16<0x07, "phsubsw",
2760 int_x86_ssse3_phsub_sw,
2761 int_x86_ssse3_phsub_sw_128>;
2762defm PMADDUBSW : SS3I_binop_rm_int_8 <0x04, "pmaddubsw",
2763 int_x86_ssse3_pmadd_ub_sw,
2764 int_x86_ssse3_pmadd_ub_sw_128, 1>;
2765defm PMULHRSW : SS3I_binop_rm_int_16<0x0B, "pmulhrsw",
2766 int_x86_ssse3_pmul_hr_sw,
2767 int_x86_ssse3_pmul_hr_sw_128, 1>;
2768defm PSHUFB : SS3I_binop_rm_int_8 <0x00, "pshufb",
2769 int_x86_ssse3_pshuf_b,
2770 int_x86_ssse3_pshuf_b_128>;
2771defm PSIGNB : SS3I_binop_rm_int_8 <0x08, "psignb",
2772 int_x86_ssse3_psign_b,
2773 int_x86_ssse3_psign_b_128>;
2774defm PSIGNW : SS3I_binop_rm_int_16<0x09, "psignw",
2775 int_x86_ssse3_psign_w,
2776 int_x86_ssse3_psign_w_128>;
2777defm PSIGND : SS3I_binop_rm_int_32<0x09, "psignd",
2778 int_x86_ssse3_psign_d,
2779 int_x86_ssse3_psign_d_128>;
2780
Evan Cheng3ea4d672008-03-05 08:19:16 +00002781let Constraints = "$src1 = $dst" in {
Bill Wendling1dc817c2007-08-10 09:00:17 +00002782 def PALIGNR64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
2783 (ins VR64:$src1, VR64:$src2, i16imm:$src3),
Dale Johannesen576b27e2007-10-11 20:58:37 +00002784 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Bill Wendling1dc817c2007-08-10 09:00:17 +00002785 [(set VR64:$dst,
2786 (int_x86_ssse3_palign_r
2787 VR64:$src1, VR64:$src2,
2788 imm:$src3))]>;
Dan Gohmanbcb9d462008-05-28 01:50:19 +00002789 def PALIGNR64rm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
Bill Wendling1dc817c2007-08-10 09:00:17 +00002790 (ins VR64:$src1, i64mem:$src2, i16imm:$src3),
Dale Johannesen576b27e2007-10-11 20:58:37 +00002791 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Bill Wendling1dc817c2007-08-10 09:00:17 +00002792 [(set VR64:$dst,
2793 (int_x86_ssse3_palign_r
2794 VR64:$src1,
2795 (bitconvert (memopv2i32 addr:$src2)),
2796 imm:$src3))]>;
Bill Wendling98680292007-08-10 06:22:27 +00002797
Bill Wendling1dc817c2007-08-10 09:00:17 +00002798 def PALIGNR128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
2799 (ins VR128:$src1, VR128:$src2, i32imm:$src3),
Dale Johannesen576b27e2007-10-11 20:58:37 +00002800 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Bill Wendling1dc817c2007-08-10 09:00:17 +00002801 [(set VR128:$dst,
2802 (int_x86_ssse3_palign_r_128
2803 VR128:$src1, VR128:$src2,
2804 imm:$src3))]>, OpSize;
Dan Gohmanbcb9d462008-05-28 01:50:19 +00002805 def PALIGNR128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
Bill Wendling1dc817c2007-08-10 09:00:17 +00002806 (ins VR128:$src1, i128mem:$src2, i32imm:$src3),
Dale Johannesen576b27e2007-10-11 20:58:37 +00002807 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Bill Wendling1dc817c2007-08-10 09:00:17 +00002808 [(set VR128:$dst,
2809 (int_x86_ssse3_palign_r_128
2810 VR128:$src1,
2811 (bitconvert (memopv4i32 addr:$src2)),
2812 imm:$src3))]>, OpSize;
Bill Wendling98680292007-08-10 06:22:27 +00002813}
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002814
2815//===----------------------------------------------------------------------===//
2816// Non-Instruction Patterns
2817//===----------------------------------------------------------------------===//
2818
Chris Lattnerdec9cb52008-01-24 08:07:48 +00002819// extload f32 -> f64. This matches load+fextend because we have a hack in
2820// the isel (PreprocessForFPConvert) that can introduce loads after dag combine.
2821// Since these loads aren't folded into the fextend, we have to match it
2822// explicitly here.
2823let Predicates = [HasSSE2] in
2824 def : Pat<(fextend (loadf32 addr:$src)),
2825 (CVTSS2SDrm addr:$src)>;
2826
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002827// bit_convert
2828let Predicates = [HasSSE2] in {
2829 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
2830 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
2831 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
2832 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
2833 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
2834 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
2835 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
2836 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
2837 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
2838 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
2839 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
2840 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
2841 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
2842 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
2843 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
2844 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
2845 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
2846 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
2847 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
2848 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
2849 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
2850 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
2851 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
2852 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
2853 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
2854 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
2855 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
2856 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
2857 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
2858 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
2859}
2860
2861// Move scalar to XMM zero-extended
2862// movd to XMM register zero-extends
2863let AddedComplexity = 15 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002864// Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
Evan Chenge9b9c672008-05-09 21:53:03 +00002865def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002866 (MOVLSD2PDrr (V_SET0), FR64:$src)>, Requires<[HasSSE2]>;
Evan Chenge9b9c672008-05-09 21:53:03 +00002867def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002868 (MOVLSS2PSrr (V_SET0), FR32:$src)>, Requires<[HasSSE2]>;
Evan Chenge259e872008-05-09 23:37:55 +00002869def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
2870 (MOVLPSrr (V_SET0), VR128:$src)>, Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002871}
2872
2873// Splat v2f64 / v2i64
2874let AddedComplexity = 10 in {
2875def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2876 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2877def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2878 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2879def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2880 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2881def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2882 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2883}
2884
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002885// Special unary SHUFPSrri case.
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002886def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef),
2887 SHUFP_unary_shuffle_mask:$sm)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002888 (SHUFPSrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2889 Requires<[HasSSE1]>;
Dan Gohman7dc19012007-08-02 21:17:01 +00002890// Special unary SHUFPDrri case.
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002891def : Pat<(v2f64 (vector_shuffle VR128:$src1, (undef),
2892 SHUFP_unary_shuffle_mask:$sm)),
Dan Gohman7dc19012007-08-02 21:17:01 +00002893 (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2894 Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002895// Unary v4f32 shuffle with PSHUF* in order to fold a load.
Evan Chengbf8b2c52008-04-05 00:30:36 +00002896def : Pat<(vector_shuffle (bc_v4i32 (memopv4f32 addr:$src1)), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002897 SHUFP_unary_shuffle_mask:$sm),
2898 (PSHUFDmi addr:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2899 Requires<[HasSSE2]>;
2900// Special binary v4i32 shuffle cases with SHUFPS.
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002901def : Pat<(v4i32 (vector_shuffle VR128:$src1, (v4i32 VR128:$src2),
2902 PSHUFD_binary_shuffle_mask:$sm)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002903 (SHUFPSrri VR128:$src1, VR128:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2904 Requires<[HasSSE2]>;
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002905def : Pat<(v4i32 (vector_shuffle VR128:$src1,
2906 (bc_v4i32 (memopv2i64 addr:$src2)), PSHUFD_binary_shuffle_mask:$sm)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002907 (SHUFPSrmi VR128:$src1, addr:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2908 Requires<[HasSSE2]>;
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002909// Special binary v2i64 shuffle cases using SHUFPDrri.
2910def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2911 SHUFP_shuffle_mask:$sm)),
2912 (SHUFPDrri VR128:$src1, VR128:$src2, SHUFP_shuffle_mask:$sm)>,
2913 Requires<[HasSSE2]>;
2914// Special unary SHUFPDrri case.
2915def : Pat<(v2i64 (vector_shuffle VR128:$src1, (undef),
2916 SHUFP_unary_shuffle_mask:$sm)),
2917 (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2918 Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002919
2920// vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
2921let AddedComplexity = 10 in {
2922def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2923 UNPCKL_v_undef_shuffle_mask)),
2924 (UNPCKLPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2925def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2926 UNPCKL_v_undef_shuffle_mask)),
2927 (PUNPCKLBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2928def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2929 UNPCKL_v_undef_shuffle_mask)),
2930 (PUNPCKLWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2931def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2932 UNPCKL_v_undef_shuffle_mask)),
2933 (PUNPCKLDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2934}
2935
2936// vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
2937let AddedComplexity = 10 in {
2938def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2939 UNPCKH_v_undef_shuffle_mask)),
2940 (UNPCKHPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2941def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2942 UNPCKH_v_undef_shuffle_mask)),
2943 (PUNPCKHBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2944def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2945 UNPCKH_v_undef_shuffle_mask)),
2946 (PUNPCKHWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2947def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2948 UNPCKH_v_undef_shuffle_mask)),
2949 (PUNPCKHDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2950}
2951
2952let AddedComplexity = 15 in {
2953// vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
2954def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2955 MOVHP_shuffle_mask)),
2956 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
2957
2958// vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
2959def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2960 MOVHLPS_shuffle_mask)),
2961 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
2962
2963// vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
2964def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef),
2965 MOVHLPS_v_undef_shuffle_mask)),
2966 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2967def : Pat<(v4i32 (vector_shuffle VR128:$src1, (undef),
2968 MOVHLPS_v_undef_shuffle_mask)),
2969 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2970}
2971
2972let AddedComplexity = 20 in {
2973// vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
2974// vector_shuffle v1, (load v2) <0, 1, 4, 5> using MOVHPS
Evan Cheng00b66ef2008-05-23 00:37:07 +00002975def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memop addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002976 MOVLP_shuffle_mask)),
2977 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
Evan Cheng00b66ef2008-05-23 00:37:07 +00002978def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memop addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002979 MOVLP_shuffle_mask)),
2980 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
Evan Cheng00b66ef2008-05-23 00:37:07 +00002981def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memop addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002982 MOVHP_shuffle_mask)),
2983 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
Evan Cheng00b66ef2008-05-23 00:37:07 +00002984def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memop addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002985 MOVHP_shuffle_mask)),
2986 (MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2987
Evan Cheng2b2a7012008-05-23 21:23:16 +00002988def : Pat<(v4i32 (vector_shuffle VR128:$src1,
2989 (bc_v4i32 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002990 MOVLP_shuffle_mask)),
2991 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
Evan Cheng00b66ef2008-05-23 00:37:07 +00002992def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memop addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002993 MOVLP_shuffle_mask)),
2994 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
Evan Cheng2b2a7012008-05-23 21:23:16 +00002995def : Pat<(v4i32 (vector_shuffle VR128:$src1,
2996 (bc_v4i32 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002997 MOVHP_shuffle_mask)),
2998 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
Evan Cheng00b66ef2008-05-23 00:37:07 +00002999def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memop addr:$src2),
Evan Cheng1ff2ea52008-05-23 18:00:18 +00003000 MOVHP_shuffle_mask)),
3001 (MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003002}
3003
Evan Cheng2b2a7012008-05-23 21:23:16 +00003004// (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
3005// (store (vector_shuffle (load addr), v2, <0, 1, 4, 5>), addr) using MOVHPS
3006def : Pat<(store (v4f32 (vector_shuffle (memop addr:$src1), VR128:$src2,
3007 MOVLP_shuffle_mask)), addr:$src1),
3008 (MOVLPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
3009def : Pat<(store (v2f64 (vector_shuffle (memop addr:$src1), VR128:$src2,
3010 MOVLP_shuffle_mask)), addr:$src1),
3011 (MOVLPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3012def : Pat<(store (v4f32 (vector_shuffle (memop addr:$src1), VR128:$src2,
3013 MOVHP_shuffle_mask)), addr:$src1),
3014 (MOVHPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
3015def : Pat<(store (v2f64 (vector_shuffle (memop addr:$src1), VR128:$src2,
3016 MOVHP_shuffle_mask)), addr:$src1),
3017 (MOVHPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3018
3019def : Pat<(store (v4i32 (vector_shuffle
3020 (bc_v4i32 (memopv2i64 addr:$src1)), VR128:$src2,
3021 MOVLP_shuffle_mask)), addr:$src1),
3022 (MOVLPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
3023def : Pat<(store (v2i64 (vector_shuffle (memop addr:$src1), VR128:$src2,
3024 MOVLP_shuffle_mask)), addr:$src1),
3025 (MOVLPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3026def : Pat<(store (v4i32 (vector_shuffle
3027 (bc_v4i32 (memopv2i64 addr:$src1)), VR128:$src2,
3028 MOVHP_shuffle_mask)), addr:$src1),
3029 (MOVHPSmr addr:$src1, VR128:$src2)>, Requires<[HasSSE1]>;
3030def : Pat<(store (v2i64 (vector_shuffle (memop addr:$src1), VR128:$src2,
3031 MOVHP_shuffle_mask)), addr:$src1),
3032 (MOVHPDmr addr:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3033
3034
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003035let AddedComplexity = 15 in {
3036// Setting the lowest element in the vector.
3037def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
3038 MOVL_shuffle_mask)),
3039 (MOVLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3040def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
3041 MOVL_shuffle_mask)),
3042 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3043
3044// vector_shuffle v1, v2 <4, 5, 2, 3> using MOVLPDrr (movsd)
3045def : Pat<(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
3046 MOVLP_shuffle_mask)),
3047 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3048def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
3049 MOVLP_shuffle_mask)),
3050 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3051}
3052
3053// Set lowest element and zero upper elements.
Evan Cheng15e8f5a2007-12-15 03:00:47 +00003054let AddedComplexity = 15 in
3055def : Pat<(v2f64 (vector_shuffle immAllZerosV_bc, VR128:$src,
3056 MOVL_shuffle_mask)),
3057 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
Evan Chenge9b9c672008-05-09 21:53:03 +00003058def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
Evan Chengd09a8a02008-05-08 22:35:02 +00003059 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003060
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003061// Some special case pandn patterns.
3062def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3063 VR128:$src2)),
3064 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3065def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3066 VR128:$src2)),
3067 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3068def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3069 VR128:$src2)),
3070 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3071
3072def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
Evan Cheng00b66ef2008-05-23 00:37:07 +00003073 (memop addr:$src2))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003074 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3075def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
Evan Cheng00b66ef2008-05-23 00:37:07 +00003076 (memop addr:$src2))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003077 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3078def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
Evan Cheng00b66ef2008-05-23 00:37:07 +00003079 (memop addr:$src2))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003080 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3081
Nate Begeman78246ca2007-11-17 03:58:34 +00003082// vector -> vector casts
3083def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3084 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3085def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3086 (Int_CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3087
Evan Cheng51a49b22007-07-20 00:27:43 +00003088// Use movaps / movups for SSE integer load / store (one byte shorter).
Dan Gohman11821702007-07-27 17:16:43 +00003089def : Pat<(alignedloadv4i32 addr:$src),
3090 (MOVAPSrm addr:$src)>, Requires<[HasSSE1]>;
3091def : Pat<(loadv4i32 addr:$src),
3092 (MOVUPSrm addr:$src)>, Requires<[HasSSE1]>;
Evan Cheng51a49b22007-07-20 00:27:43 +00003093def : Pat<(alignedloadv2i64 addr:$src),
3094 (MOVAPSrm addr:$src)>, Requires<[HasSSE2]>;
3095def : Pat<(loadv2i64 addr:$src),
3096 (MOVUPSrm addr:$src)>, Requires<[HasSSE2]>;
3097
3098def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3099 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3100def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3101 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3102def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3103 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3104def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3105 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3106def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3107 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3108def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3109 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3110def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3111 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3112def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3113 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
Nate Begemanb2975562008-02-03 07:18:54 +00003114
3115//===----------------------------------------------------------------------===//
3116// SSE4.1 Instructions
3117//===----------------------------------------------------------------------===//
3118
Nate Begemanb2975562008-02-03 07:18:54 +00003119multiclass sse41_fp_unop_rm<bits<8> opcss, bits<8> opcps,
3120 bits<8> opcsd, bits<8> opcpd,
3121 string OpcodeStr,
3122 Intrinsic F32Int,
3123 Intrinsic V4F32Int,
3124 Intrinsic F64Int,
Nate Begemaneb3f5432008-02-04 05:34:34 +00003125 Intrinsic V2F64Int> {
Nate Begemanb2975562008-02-03 07:18:54 +00003126 // Intrinsic operation, reg.
Evan Cheng78d00612008-03-14 07:39:27 +00003127 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
Nate Begeman72d802a2008-02-04 06:00:24 +00003128 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003129 !strconcat(OpcodeStr,
3130 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003131 [(set VR128:$dst, (F32Int VR128:$src1, imm:$src2))]>,
3132 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003133
3134 // Intrinsic operation, mem.
Evan Cheng78d00612008-03-14 07:39:27 +00003135 def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
Nate Begeman72d802a2008-02-04 06:00:24 +00003136 (outs VR128:$dst), (ins ssmem:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003137 !strconcat(OpcodeStr,
3138 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003139 [(set VR128:$dst, (F32Int sse_load_f32:$src1, imm:$src2))]>,
3140 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003141
3142 // Vector intrinsic operation, reg
Evan Cheng78d00612008-03-14 07:39:27 +00003143 def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
Nate Begeman72d802a2008-02-04 06:00:24 +00003144 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003145 !strconcat(OpcodeStr,
3146 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003147 [(set VR128:$dst, (V4F32Int VR128:$src1, imm:$src2))]>,
3148 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003149
3150 // Vector intrinsic operation, mem
Evan Cheng78d00612008-03-14 07:39:27 +00003151 def PSm_Int : SS4AIi8<opcps, MRMSrcMem,
Nate Begeman72d802a2008-02-04 06:00:24 +00003152 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003153 !strconcat(OpcodeStr,
3154 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Evan Cheng00b66ef2008-05-23 00:37:07 +00003155 [(set VR128:$dst,
3156 (V4F32Int (memopv4f32 addr:$src1),imm:$src2))]>,
Nate Begemaneb3f5432008-02-04 05:34:34 +00003157 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003158
3159 // Intrinsic operation, reg.
Evan Cheng78d00612008-03-14 07:39:27 +00003160 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
Nate Begeman72d802a2008-02-04 06:00:24 +00003161 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003162 !strconcat(OpcodeStr,
3163 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003164 [(set VR128:$dst, (F64Int VR128:$src1, imm:$src2))]>,
3165 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003166
3167 // Intrinsic operation, mem.
Evan Cheng78d00612008-03-14 07:39:27 +00003168 def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
Nate Begeman72d802a2008-02-04 06:00:24 +00003169 (outs VR128:$dst), (ins sdmem:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003170 !strconcat(OpcodeStr,
3171 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003172 [(set VR128:$dst, (F64Int sse_load_f64:$src1, imm:$src2))]>,
3173 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003174
3175 // Vector intrinsic operation, reg
Evan Cheng78d00612008-03-14 07:39:27 +00003176 def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
Nate Begeman72d802a2008-02-04 06:00:24 +00003177 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003178 !strconcat(OpcodeStr,
3179 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003180 [(set VR128:$dst, (V2F64Int VR128:$src1, imm:$src2))]>,
3181 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003182
3183 // Vector intrinsic operation, mem
Evan Cheng78d00612008-03-14 07:39:27 +00003184 def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
Nate Begeman72d802a2008-02-04 06:00:24 +00003185 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003186 !strconcat(OpcodeStr,
3187 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Evan Cheng00b66ef2008-05-23 00:37:07 +00003188 [(set VR128:$dst,
3189 (V2F64Int (memopv2f64 addr:$src1),imm:$src2))]>,
Nate Begemaneb3f5432008-02-04 05:34:34 +00003190 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003191}
3192
3193// FP round - roundss, roundps, roundsd, roundpd
3194defm ROUND : sse41_fp_unop_rm<0x0A, 0x08, 0x0B, 0x09, "round",
3195 int_x86_sse41_round_ss, int_x86_sse41_round_ps,
3196 int_x86_sse41_round_sd, int_x86_sse41_round_pd>;
Nate Begemaneb3f5432008-02-04 05:34:34 +00003197
3198// SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
3199multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
3200 Intrinsic IntId128> {
3201 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3202 (ins VR128:$src),
3203 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3204 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
3205 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3206 (ins i128mem:$src),
3207 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3208 [(set VR128:$dst,
3209 (IntId128
3210 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
3211}
3212
3213defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
3214 int_x86_sse41_phminposuw>;
3215
3216/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
Evan Cheng3ea4d672008-03-05 08:19:16 +00003217let Constraints = "$src1 = $dst" in {
Nate Begemaneb3f5432008-02-04 05:34:34 +00003218 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
3219 Intrinsic IntId128, bit Commutable = 0> {
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003220 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3221 (ins VR128:$src1, VR128:$src2),
3222 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3223 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3224 OpSize {
Nate Begemaneb3f5432008-02-04 05:34:34 +00003225 let isCommutable = Commutable;
3226 }
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003227 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3228 (ins VR128:$src1, i128mem:$src2),
3229 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3230 [(set VR128:$dst,
3231 (IntId128 VR128:$src1,
3232 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
Nate Begemaneb3f5432008-02-04 05:34:34 +00003233 }
3234}
3235
3236defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq",
3237 int_x86_sse41_pcmpeqq, 1>;
3238defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw",
3239 int_x86_sse41_packusdw, 0>;
3240defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb",
3241 int_x86_sse41_pminsb, 1>;
3242defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd",
3243 int_x86_sse41_pminsd, 1>;
3244defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud",
3245 int_x86_sse41_pminud, 1>;
3246defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw",
3247 int_x86_sse41_pminuw, 1>;
3248defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb",
3249 int_x86_sse41_pmaxsb, 1>;
3250defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd",
3251 int_x86_sse41_pmaxsd, 1>;
3252defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud",
3253 int_x86_sse41_pmaxud, 1>;
3254defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw",
3255 int_x86_sse41_pmaxuw, 1>;
Nate Begeman72d802a2008-02-04 06:00:24 +00003256
Nate Begeman58057962008-02-09 01:38:08 +00003257
3258/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
Evan Cheng3ea4d672008-03-05 08:19:16 +00003259let Constraints = "$src1 = $dst" in {
Dan Gohmane3731f52008-05-23 17:49:40 +00003260 multiclass SS41I_binop_patint<bits<8> opc, string OpcodeStr, ValueType OpVT,
3261 SDNode OpNode, Intrinsic IntId128,
3262 bit Commutable = 0> {
Nate Begeman58057962008-02-09 01:38:08 +00003263 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3264 (ins VR128:$src1, VR128:$src2),
3265 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmane3731f52008-05-23 17:49:40 +00003266 [(set VR128:$dst, (OpNode (OpVT VR128:$src1),
3267 VR128:$src2))]>, OpSize {
Nate Begeman58057962008-02-09 01:38:08 +00003268 let isCommutable = Commutable;
3269 }
3270 def rr_int : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3271 (ins VR128:$src1, VR128:$src2),
3272 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3273 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3274 OpSize {
3275 let isCommutable = Commutable;
3276 }
3277 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3278 (ins VR128:$src1, i128mem:$src2),
3279 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3280 [(set VR128:$dst,
Evan Cheng00b66ef2008-05-23 00:37:07 +00003281 (OpNode VR128:$src1, (memop addr:$src2)))]>, OpSize;
Nate Begeman58057962008-02-09 01:38:08 +00003282 def rm_int : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3283 (ins VR128:$src1, i128mem:$src2),
3284 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3285 [(set VR128:$dst,
Evan Cheng00b66ef2008-05-23 00:37:07 +00003286 (IntId128 VR128:$src1, (memop addr:$src2)))]>,
Nate Begeman58057962008-02-09 01:38:08 +00003287 OpSize;
3288 }
3289}
Dan Gohmane3731f52008-05-23 17:49:40 +00003290defm PMULLD : SS41I_binop_patint<0x40, "pmulld", v4i32, mul,
Nate Begeman58057962008-02-09 01:38:08 +00003291 int_x86_sse41_pmulld, 1>;
Dan Gohmane3731f52008-05-23 17:49:40 +00003292defm PMULDQ : SS41I_binop_patint<0x28, "pmuldq", v2i64, mul,
3293 int_x86_sse41_pmuldq, 1>;
Nate Begeman58057962008-02-09 01:38:08 +00003294
3295
Evan Cheng78d00612008-03-14 07:39:27 +00003296/// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
Evan Cheng3ea4d672008-03-05 08:19:16 +00003297let Constraints = "$src1 = $dst" in {
Nate Begeman72d802a2008-02-04 06:00:24 +00003298 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
3299 Intrinsic IntId128, bit Commutable = 0> {
Evan Cheng78d00612008-03-14 07:39:27 +00003300 def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003301 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
3302 !strconcat(OpcodeStr,
Nate Begemanb4e9a042008-02-10 18:47:57 +00003303 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003304 [(set VR128:$dst,
3305 (IntId128 VR128:$src1, VR128:$src2, imm:$src3))]>,
3306 OpSize {
Nate Begeman72d802a2008-02-04 06:00:24 +00003307 let isCommutable = Commutable;
3308 }
Evan Cheng78d00612008-03-14 07:39:27 +00003309 def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003310 (ins VR128:$src1, i128mem:$src2, i32i8imm:$src3),
3311 !strconcat(OpcodeStr,
Nate Begemanb4e9a042008-02-10 18:47:57 +00003312 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003313 [(set VR128:$dst,
3314 (IntId128 VR128:$src1,
3315 (bitconvert (memopv16i8 addr:$src2)), imm:$src3))]>,
3316 OpSize;
Nate Begeman72d802a2008-02-04 06:00:24 +00003317 }
3318}
3319
3320defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps",
3321 int_x86_sse41_blendps, 0>;
3322defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd",
3323 int_x86_sse41_blendpd, 0>;
3324defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw",
3325 int_x86_sse41_pblendw, 0>;
3326defm DPPS : SS41I_binop_rmi_int<0x40, "dpps",
3327 int_x86_sse41_dpps, 1>;
3328defm DPPD : SS41I_binop_rmi_int<0x41, "dppd",
3329 int_x86_sse41_dppd, 1>;
3330defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw",
3331 int_x86_sse41_mpsadbw, 0>;
Nate Begeman58057962008-02-09 01:38:08 +00003332
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003333
Evan Cheng78d00612008-03-14 07:39:27 +00003334/// SS41I_ternary_int - SSE 4.1 ternary operator
Evan Cheng3ea4d672008-03-05 08:19:16 +00003335let Uses = [XMM0], Constraints = "$src1 = $dst" in {
Nate Begemanb4e9a042008-02-10 18:47:57 +00003336 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3337 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3338 (ins VR128:$src1, VR128:$src2),
3339 !strconcat(OpcodeStr,
3340 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
3341 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
3342 OpSize;
3343
3344 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3345 (ins VR128:$src1, i128mem:$src2),
3346 !strconcat(OpcodeStr,
3347 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
3348 [(set VR128:$dst,
3349 (IntId VR128:$src1,
3350 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
3351 }
3352}
3353
3354defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
3355defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
3356defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
3357
3358
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003359multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3360 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3361 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3362 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3363
3364 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3365 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3366 [(set VR128:$dst,
3367 (IntId (bitconvert (v4i32 (load addr:$src)))))]>, OpSize;
3368}
3369
3370defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
3371defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
3372defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
3373defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
3374defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
3375defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
3376
3377multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3378 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3379 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3380 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3381
3382 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3383 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3384 [(set VR128:$dst,
3385 (IntId (bitconvert (v4i32 (load addr:$src)))))]>, OpSize;
3386}
3387
3388defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
3389defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
3390defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
3391defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
3392
3393multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3394 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3395 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3396 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3397
3398 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
3399 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3400 [(set VR128:$dst,
3401 (IntId (bitconvert (v4i32 (load addr:$src)))))]>, OpSize;
3402}
3403
3404defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
3405defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovsxbq", int_x86_sse41_pmovzxbq>;
3406
3407
Nate Begemand77e59e2008-02-11 04:19:36 +00003408/// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
3409multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
Evan Chengc2054be2008-03-26 08:11:49 +00003410 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003411 (ins VR128:$src1, i32i8imm:$src2),
3412 !strconcat(OpcodeStr,
3413 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemand77e59e2008-02-11 04:19:36 +00003414 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
3415 OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003416 def mr : SS4AIi8<opc, MRMDestMem, (outs),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003417 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
3418 !strconcat(OpcodeStr,
3419 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemand77e59e2008-02-11 04:19:36 +00003420 []>, OpSize;
3421// FIXME:
3422// There's an AssertZext in the way of writing the store pattern
3423// (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003424}
3425
Nate Begemand77e59e2008-02-11 04:19:36 +00003426defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003427
Nate Begemand77e59e2008-02-11 04:19:36 +00003428
3429/// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
3430multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
Evan Cheng78d00612008-03-14 07:39:27 +00003431 def mr : SS4AIi8<opc, MRMDestMem, (outs),
Nate Begemand77e59e2008-02-11 04:19:36 +00003432 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
3433 !strconcat(OpcodeStr,
3434 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3435 []>, OpSize;
3436// FIXME:
3437// There's an AssertZext in the way of writing the store pattern
3438// (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
3439}
3440
3441defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
3442
3443
3444/// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
3445multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
Evan Chengc2054be2008-03-26 08:11:49 +00003446 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003447 (ins VR128:$src1, i32i8imm:$src2),
3448 !strconcat(OpcodeStr,
3449 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3450 [(set GR32:$dst,
3451 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003452 def mr : SS4AIi8<opc, MRMDestMem, (outs),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003453 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
3454 !strconcat(OpcodeStr,
3455 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3456 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
3457 addr:$dst)]>, OpSize;
Nate Begeman58057962008-02-09 01:38:08 +00003458}
3459
Nate Begemand77e59e2008-02-11 04:19:36 +00003460defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
Nate Begeman58057962008-02-09 01:38:08 +00003461
Nate Begemand77e59e2008-02-11 04:19:36 +00003462
Evan Cheng6c249332008-03-24 21:52:23 +00003463/// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
3464/// destination
Nate Begemand77e59e2008-02-11 04:19:36 +00003465multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
Evan Chengc2054be2008-03-26 08:11:49 +00003466 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003467 (ins VR128:$src1, i32i8imm:$src2),
3468 !strconcat(OpcodeStr,
3469 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Dan Gohman788db592008-04-16 02:32:24 +00003470 [(set GR32:$dst,
3471 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
Evan Cheng6c249332008-03-24 21:52:23 +00003472 OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003473 def mr : SS4AIi8<opc, MRMDestMem, (outs),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003474 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
3475 !strconcat(OpcodeStr,
3476 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Evan Cheng6c249332008-03-24 21:52:23 +00003477 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003478 addr:$dst)]>, OpSize;
Nate Begeman58057962008-02-09 01:38:08 +00003479}
3480
Nate Begemand77e59e2008-02-11 04:19:36 +00003481defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003482
Evan Cheng3ea4d672008-03-05 08:19:16 +00003483let Constraints = "$src1 = $dst" in {
Nate Begemand77e59e2008-02-11 04:19:36 +00003484 multiclass SS41I_insert8<bits<8> opc, string OpcodeStr> {
Evan Cheng78d00612008-03-14 07:39:27 +00003485 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003486 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
3487 !strconcat(OpcodeStr,
3488 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3489 [(set VR128:$dst,
3490 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003491 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003492 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
3493 !strconcat(OpcodeStr,
3494 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3495 [(set VR128:$dst,
3496 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
3497 imm:$src3))]>, OpSize;
3498 }
3499}
3500
3501defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
3502
Evan Cheng3ea4d672008-03-05 08:19:16 +00003503let Constraints = "$src1 = $dst" in {
Nate Begemand77e59e2008-02-11 04:19:36 +00003504 multiclass SS41I_insert32<bits<8> opc, string OpcodeStr> {
Evan Cheng78d00612008-03-14 07:39:27 +00003505 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003506 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
3507 !strconcat(OpcodeStr,
3508 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3509 [(set VR128:$dst,
3510 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
3511 OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003512 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003513 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
3514 !strconcat(OpcodeStr,
3515 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3516 [(set VR128:$dst,
3517 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
3518 imm:$src3)))]>, OpSize;
3519 }
3520}
3521
3522defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
3523
Evan Cheng3ea4d672008-03-05 08:19:16 +00003524let Constraints = "$src1 = $dst" in {
Nate Begemand77e59e2008-02-11 04:19:36 +00003525 multiclass SS41I_insertf32<bits<8> opc, string OpcodeStr> {
Evan Cheng78d00612008-03-14 07:39:27 +00003526 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003527 (ins VR128:$src1, FR32:$src2, i32i8imm:$src3),
3528 !strconcat(OpcodeStr,
3529 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3530 [(set VR128:$dst,
3531 (X86insrtps VR128:$src1, FR32:$src2, imm:$src3))]>, OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003532 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003533 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
3534 !strconcat(OpcodeStr,
3535 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3536 [(set VR128:$dst,
3537 (X86insrtps VR128:$src1, (loadf32 addr:$src2),
3538 imm:$src3))]>, OpSize;
3539 }
3540}
3541
Evan Chengc2054be2008-03-26 08:11:49 +00003542defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
Nate Begeman0dd3cb52008-03-16 21:14:46 +00003543
3544let Defs = [EFLAGS] in {
3545def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
3546 "ptest \t{$src2, $src1|$src1, $src2}", []>, OpSize;
3547def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
3548 "ptest \t{$src2, $src1|$src1, $src2}", []>, OpSize;
3549}
3550
3551def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3552 "movntdqa\t{$src, $dst|$dst, $src}",
3553 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>;