blob: c7ddfdc2f1784e15ea54ec36cb74f8b6f8db7b77 [file] [log] [blame]
Arnold Schwaighofer373e8652007-10-12 21:30:57 +00001//====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner081ce942007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file describes the X86 SSE instruction set, defining the instructions,
11// and properties of the instructions which are needed for code generation,
12// machine code emission, and analysis.
13//
14//===----------------------------------------------------------------------===//
15
16
17//===----------------------------------------------------------------------===//
18// SSE specific DAG Nodes.
19//===----------------------------------------------------------------------===//
20
21def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
22 SDTCisFP<0>, SDTCisInt<2> ]>;
23
Dan Gohmanf17a25c2007-07-18 16:29:46 +000024def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
25def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
26def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
27 [SDNPCommutative, SDNPAssociative]>;
28def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
29 [SDNPCommutative, SDNPAssociative]>;
30def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
31 [SDNPCommutative, SDNPAssociative]>;
32def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
33def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
34def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
Evan Chengf37bf452007-10-01 18:12:48 +000035def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
Evan Cheng621216e2007-09-29 00:00:36 +000036def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
Nate Begemand77e59e2008-02-11 04:19:36 +000037def X86pextrb : SDNode<"X86ISD::PEXTRB",
38 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
39def X86pextrw : SDNode<"X86ISD::PEXTRW",
40 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
41def X86pinsrb : SDNode<"X86ISD::PINSRB",
42 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
43 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
44def X86pinsrw : SDNode<"X86ISD::PINSRW",
45 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
46 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
47def X86insrtps : SDNode<"X86ISD::INSERTPS",
48 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
49 SDTCisVT<2, f32>, SDTCisPtrTy<3>]>>;
Evan Chenge9b9c672008-05-09 21:53:03 +000050def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
51 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
52def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
53 [SDNPHasChain, SDNPMayLoad]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +000054
55//===----------------------------------------------------------------------===//
Dan Gohmanf17a25c2007-07-18 16:29:46 +000056// SSE Complex Patterns
57//===----------------------------------------------------------------------===//
58
59// These are 'extloads' from a scalar to the low element of a vector, zeroing
60// the top elements. These are used for the SSE 'ss' and 'sd' instruction
61// forms.
62def sse_load_f32 : ComplexPattern<v4f32, 4, "SelectScalarSSELoad", [],
Chris Lattnerc90ee9c2008-01-10 07:59:24 +000063 [SDNPHasChain, SDNPMayLoad]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +000064def sse_load_f64 : ComplexPattern<v2f64, 4, "SelectScalarSSELoad", [],
Chris Lattnerc90ee9c2008-01-10 07:59:24 +000065 [SDNPHasChain, SDNPMayLoad]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +000066
67def ssmem : Operand<v4f32> {
68 let PrintMethod = "printf32mem";
69 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
70}
71def sdmem : Operand<v2f64> {
72 let PrintMethod = "printf64mem";
73 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
74}
75
76//===----------------------------------------------------------------------===//
77// SSE pattern fragments
78//===----------------------------------------------------------------------===//
79
Dan Gohmanf17a25c2007-07-18 16:29:46 +000080def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
81def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
82def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
83def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
84
Dan Gohman11821702007-07-27 17:16:43 +000085// Like 'store', but always requires vector alignment.
Dan Gohman4a4f1512007-07-18 20:23:34 +000086def alignedstore : PatFrag<(ops node:$val, node:$ptr),
87 (st node:$val, node:$ptr), [{
88 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
89 return !ST->isTruncatingStore() &&
90 ST->getAddressingMode() == ISD::UNINDEXED &&
Dan Gohman11821702007-07-27 17:16:43 +000091 ST->getAlignment() >= 16;
Dan Gohman4a4f1512007-07-18 20:23:34 +000092 return false;
93}]>;
94
Dan Gohman11821702007-07-27 17:16:43 +000095// Like 'load', but always requires vector alignment.
Dan Gohman4a4f1512007-07-18 20:23:34 +000096def alignedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
97 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
98 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
99 LD->getAddressingMode() == ISD::UNINDEXED &&
Dan Gohman11821702007-07-27 17:16:43 +0000100 LD->getAlignment() >= 16;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000101 return false;
102}]>;
103
Dan Gohman11821702007-07-27 17:16:43 +0000104def alignedloadfsf32 : PatFrag<(ops node:$ptr), (f32 (alignedload node:$ptr))>;
105def alignedloadfsf64 : PatFrag<(ops node:$ptr), (f64 (alignedload node:$ptr))>;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000106def alignedloadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (alignedload node:$ptr))>;
107def alignedloadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (alignedload node:$ptr))>;
108def alignedloadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (alignedload node:$ptr))>;
109def alignedloadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (alignedload node:$ptr))>;
110
111// Like 'load', but uses special alignment checks suitable for use in
112// memory operands in most SSE instructions, which are required to
113// be naturally aligned on some targets but not on others.
114// FIXME: Actually implement support for targets that don't require the
115// alignment. This probably wants a subtarget predicate.
116def memop : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
117 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
118 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
119 LD->getAddressingMode() == ISD::UNINDEXED &&
Dan Gohman11821702007-07-27 17:16:43 +0000120 LD->getAlignment() >= 16;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000121 return false;
122}]>;
123
Dan Gohman11821702007-07-27 17:16:43 +0000124def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
125def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000126def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
127def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
128def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
129def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
Nate Begeman9a58b8a2008-02-09 23:46:37 +0000130def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000131
Bill Wendling3b15d722007-08-11 09:52:53 +0000132// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
133// 16-byte boundary.
Nate Begeman9a58b8a2008-02-09 23:46:37 +0000134// FIXME: 8 byte alignment for mmx reads is not required
Bill Wendling3b15d722007-08-11 09:52:53 +0000135def memop64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
136 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
137 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
138 LD->getAddressingMode() == ISD::UNINDEXED &&
139 LD->getAlignment() >= 8;
140 return false;
141}]>;
142
143def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
Bill Wendling3b15d722007-08-11 09:52:53 +0000144def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
145def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
146def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
147
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000148def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
149def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
150def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
151def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
152def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
153def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
154
155def fp32imm0 : PatLeaf<(f32 fpimm), [{
156 return N->isExactlyValue(+0.0);
157}]>;
158
159def PSxLDQ_imm : SDNodeXForm<imm, [{
160 // Transformation function: imm >> 3
161 return getI32Imm(N->getValue() >> 3);
162}]>;
163
Nate Begeman061db5f2008-05-12 20:34:32 +0000164def SSE_CC_imm : SDNodeXForm<cond, [{
165 unsigned Val;
166 switch (N->get()) {
167 default: Val = 0; assert(0 && "Unexpected CondCode"); break;
168 case ISD::SETOEQ: Val = 0; break;
169 case ISD::SETOLT: Val = 1; break;
170 case ISD::SETOLE: Val = 2; break;
171 case ISD::SETUO: Val = 3; break;
172 case ISD::SETONE: Val = 4; break;
173 case ISD::SETOGE: Val = 5; break;
174 case ISD::SETOGT: Val = 6; break;
175 case ISD::SETO: Val = 7; break;
176 }
177 return getI8Imm(Val);
178}]>;
179
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000180// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
181// SHUFP* etc. imm.
182def SHUFFLE_get_shuf_imm : SDNodeXForm<build_vector, [{
183 return getI8Imm(X86::getShuffleSHUFImmediate(N));
184}]>;
185
186// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
187// PSHUFHW imm.
188def SHUFFLE_get_pshufhw_imm : SDNodeXForm<build_vector, [{
189 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
190}]>;
191
192// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
193// PSHUFLW imm.
194def SHUFFLE_get_pshuflw_imm : SDNodeXForm<build_vector, [{
195 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
196}]>;
197
198def SSE_splat_mask : PatLeaf<(build_vector), [{
199 return X86::isSplatMask(N);
200}], SHUFFLE_get_shuf_imm>;
201
202def SSE_splat_lo_mask : PatLeaf<(build_vector), [{
203 return X86::isSplatLoMask(N);
204}]>;
205
206def MOVHLPS_shuffle_mask : PatLeaf<(build_vector), [{
207 return X86::isMOVHLPSMask(N);
208}]>;
209
210def MOVHLPS_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
211 return X86::isMOVHLPS_v_undef_Mask(N);
212}]>;
213
214def MOVHP_shuffle_mask : PatLeaf<(build_vector), [{
215 return X86::isMOVHPMask(N);
216}]>;
217
218def MOVLP_shuffle_mask : PatLeaf<(build_vector), [{
219 return X86::isMOVLPMask(N);
220}]>;
221
222def MOVL_shuffle_mask : PatLeaf<(build_vector), [{
223 return X86::isMOVLMask(N);
224}]>;
225
226def MOVSHDUP_shuffle_mask : PatLeaf<(build_vector), [{
227 return X86::isMOVSHDUPMask(N);
228}]>;
229
230def MOVSLDUP_shuffle_mask : PatLeaf<(build_vector), [{
231 return X86::isMOVSLDUPMask(N);
232}]>;
233
234def UNPCKL_shuffle_mask : PatLeaf<(build_vector), [{
235 return X86::isUNPCKLMask(N);
236}]>;
237
238def UNPCKH_shuffle_mask : PatLeaf<(build_vector), [{
239 return X86::isUNPCKHMask(N);
240}]>;
241
242def UNPCKL_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
243 return X86::isUNPCKL_v_undef_Mask(N);
244}]>;
245
246def UNPCKH_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
247 return X86::isUNPCKH_v_undef_Mask(N);
248}]>;
249
250def PSHUFD_shuffle_mask : PatLeaf<(build_vector), [{
251 return X86::isPSHUFDMask(N);
252}], SHUFFLE_get_shuf_imm>;
253
254def PSHUFHW_shuffle_mask : PatLeaf<(build_vector), [{
255 return X86::isPSHUFHWMask(N);
256}], SHUFFLE_get_pshufhw_imm>;
257
258def PSHUFLW_shuffle_mask : PatLeaf<(build_vector), [{
259 return X86::isPSHUFLWMask(N);
260}], SHUFFLE_get_pshuflw_imm>;
261
262def SHUFP_unary_shuffle_mask : PatLeaf<(build_vector), [{
263 return X86::isPSHUFDMask(N);
264}], SHUFFLE_get_shuf_imm>;
265
266def SHUFP_shuffle_mask : PatLeaf<(build_vector), [{
267 return X86::isSHUFPMask(N);
268}], SHUFFLE_get_shuf_imm>;
269
270def PSHUFD_binary_shuffle_mask : PatLeaf<(build_vector), [{
271 return X86::isSHUFPMask(N);
272}], SHUFFLE_get_shuf_imm>;
273
Nate Begeman061db5f2008-05-12 20:34:32 +0000274
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000275//===----------------------------------------------------------------------===//
276// SSE scalar FP Instructions
277//===----------------------------------------------------------------------===//
278
279// CMOV* - Used to implement the SSE SELECT DAG operation. Expanded by the
280// scheduler into a branch sequence.
Evan Cheng950aac02007-09-25 01:57:46 +0000281// These are expanded by the scheduler.
282let Uses = [EFLAGS], usesCustomDAGSchedInserter = 1 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000283 def CMOV_FR32 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000284 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000285 "#CMOV_FR32 PSEUDO!",
Evan Cheng621216e2007-09-29 00:00:36 +0000286 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
287 EFLAGS))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000288 def CMOV_FR64 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000289 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000290 "#CMOV_FR64 PSEUDO!",
Evan Cheng621216e2007-09-29 00:00:36 +0000291 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
292 EFLAGS))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000293 def CMOV_V4F32 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000294 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000295 "#CMOV_V4F32 PSEUDO!",
296 [(set VR128:$dst,
Evan Cheng621216e2007-09-29 00:00:36 +0000297 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
298 EFLAGS)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000299 def CMOV_V2F64 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000300 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000301 "#CMOV_V2F64 PSEUDO!",
302 [(set VR128:$dst,
Evan Cheng621216e2007-09-29 00:00:36 +0000303 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
304 EFLAGS)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000305 def CMOV_V2I64 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000306 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000307 "#CMOV_V2I64 PSEUDO!",
308 [(set VR128:$dst,
Evan Cheng621216e2007-09-29 00:00:36 +0000309 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
Evan Cheng950aac02007-09-25 01:57:46 +0000310 EFLAGS)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000311}
312
313//===----------------------------------------------------------------------===//
314// SSE1 Instructions
315//===----------------------------------------------------------------------===//
316
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000317// Move Instructions
Chris Lattnerd1a9eb62008-01-11 06:59:07 +0000318let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000319def MOVSSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000320 "movss\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +0000321let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000322def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000323 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000324 [(set FR32:$dst, (loadf32 addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000325def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000326 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000327 [(store FR32:$src, addr:$dst)]>;
328
329// Conversion instructions
Evan Chengb783fa32007-07-19 01:14:50 +0000330def CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000331 "cvttss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000332 [(set GR32:$dst, (fp_to_sint FR32:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000333def CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000334 "cvttss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000335 [(set GR32:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000336def CVTSI2SSrr : SSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000337 "cvtsi2ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000338 [(set FR32:$dst, (sint_to_fp GR32:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000339def CVTSI2SSrm : SSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000340 "cvtsi2ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000341 [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
342
343// Match intrinsics which expect XMM operand(s).
Evan Chengb783fa32007-07-19 01:14:50 +0000344def Int_CVTSS2SIrr : SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000345 "cvtss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000346 [(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000347def Int_CVTSS2SIrm : SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000348 "cvtss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000349 [(set GR32:$dst, (int_x86_sse_cvtss2si
350 (load addr:$src)))]>;
351
Dale Johannesen1fbb4a52007-10-30 22:15:38 +0000352// Match intrinisics which expect MM and XMM operand(s).
353def Int_CVTPS2PIrr : PSI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
354 "cvtps2pi\t{$src, $dst|$dst, $src}",
355 [(set VR64:$dst, (int_x86_sse_cvtps2pi VR128:$src))]>;
356def Int_CVTPS2PIrm : PSI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
357 "cvtps2pi\t{$src, $dst|$dst, $src}",
358 [(set VR64:$dst, (int_x86_sse_cvtps2pi
359 (load addr:$src)))]>;
360def Int_CVTTPS2PIrr: PSI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
361 "cvttps2pi\t{$src, $dst|$dst, $src}",
362 [(set VR64:$dst, (int_x86_sse_cvttps2pi VR128:$src))]>;
363def Int_CVTTPS2PIrm: PSI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
364 "cvttps2pi\t{$src, $dst|$dst, $src}",
365 [(set VR64:$dst, (int_x86_sse_cvttps2pi
366 (load addr:$src)))]>;
Evan Cheng3ea4d672008-03-05 08:19:16 +0000367let Constraints = "$src1 = $dst" in {
Dale Johannesen1fbb4a52007-10-30 22:15:38 +0000368 def Int_CVTPI2PSrr : PSI<0x2A, MRMSrcReg,
369 (outs VR128:$dst), (ins VR128:$src1, VR64:$src2),
370 "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
371 [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
372 VR64:$src2))]>;
373 def Int_CVTPI2PSrm : PSI<0x2A, MRMSrcMem,
374 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
375 "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
376 [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
377 (load addr:$src2)))]>;
378}
379
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000380// Aliases for intrinsics
Evan Chengb783fa32007-07-19 01:14:50 +0000381def Int_CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000382 "cvttss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000383 [(set GR32:$dst,
384 (int_x86_sse_cvttss2si VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000385def Int_CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000386 "cvttss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000387 [(set GR32:$dst,
388 (int_x86_sse_cvttss2si(load addr:$src)))]>;
389
Evan Cheng3ea4d672008-03-05 08:19:16 +0000390let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000391 def Int_CVTSI2SSrr : SSI<0x2A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000392 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000393 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000394 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
395 GR32:$src2))]>;
396 def Int_CVTSI2SSrm : SSI<0x2A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000397 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000398 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000399 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
400 (loadi32 addr:$src2)))]>;
401}
402
403// Comparison instructions
Evan Cheng3ea4d672008-03-05 08:19:16 +0000404let Constraints = "$src1 = $dst" in {
Chris Lattnerd1a9eb62008-01-11 06:59:07 +0000405let neverHasSideEffects = 1 in
Chris Lattnera9f545f2007-12-16 20:12:41 +0000406 def CMPSSrr : SSIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000407 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000408 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +0000409let neverHasSideEffects = 1, mayLoad = 1 in
Chris Lattnera9f545f2007-12-16 20:12:41 +0000410 def CMPSSrm : SSIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000411 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000412 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000413}
414
Evan Cheng55687072007-09-14 21:48:26 +0000415let Defs = [EFLAGS] in {
Evan Chengb783fa32007-07-19 01:14:50 +0000416def UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000417 "ucomiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000418 [(X86cmp FR32:$src1, FR32:$src2), (implicit EFLAGS)]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000419def UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000420 "ucomiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000421 [(X86cmp FR32:$src1, (loadf32 addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +0000422 (implicit EFLAGS)]>;
Evan Cheng55687072007-09-14 21:48:26 +0000423} // Defs = [EFLAGS]
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000424
425// Aliases to match intrinsics which expect XMM operand(s).
Evan Cheng3ea4d672008-03-05 08:19:16 +0000426let Constraints = "$src1 = $dst" in {
Chris Lattnera9f545f2007-12-16 20:12:41 +0000427 def Int_CMPSSrr : SSIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000428 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000429 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000430 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
431 VR128:$src, imm:$cc))]>;
Chris Lattnera9f545f2007-12-16 20:12:41 +0000432 def Int_CMPSSrm : SSIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000433 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000434 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000435 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
436 (load addr:$src), imm:$cc))]>;
437}
438
Evan Cheng55687072007-09-14 21:48:26 +0000439let Defs = [EFLAGS] in {
Evan Cheng621216e2007-09-29 00:00:36 +0000440def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs),
Evan Cheng950aac02007-09-25 01:57:46 +0000441 (ins VR128:$src1, VR128:$src2),
442 "ucomiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000443 [(X86ucomi (v4f32 VR128:$src1), VR128:$src2),
Evan Cheng950aac02007-09-25 01:57:46 +0000444 (implicit EFLAGS)]>;
Evan Cheng621216e2007-09-29 00:00:36 +0000445def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs),
Evan Cheng950aac02007-09-25 01:57:46 +0000446 (ins VR128:$src1, f128mem:$src2),
447 "ucomiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000448 [(X86ucomi (v4f32 VR128:$src1), (load addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +0000449 (implicit EFLAGS)]>;
450
Evan Cheng621216e2007-09-29 00:00:36 +0000451def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs),
Evan Cheng950aac02007-09-25 01:57:46 +0000452 (ins VR128:$src1, VR128:$src2),
453 "comiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000454 [(X86comi (v4f32 VR128:$src1), VR128:$src2),
Evan Cheng950aac02007-09-25 01:57:46 +0000455 (implicit EFLAGS)]>;
Evan Cheng621216e2007-09-29 00:00:36 +0000456def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs),
Evan Cheng950aac02007-09-25 01:57:46 +0000457 (ins VR128:$src1, f128mem:$src2),
458 "comiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000459 [(X86comi (v4f32 VR128:$src1), (load addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +0000460 (implicit EFLAGS)]>;
Evan Cheng55687072007-09-14 21:48:26 +0000461} // Defs = [EFLAGS]
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000462
463// Aliases of packed SSE1 instructions for scalar use. These all have names that
464// start with 'Fs'.
465
466// Alias instructions that map fld0 to pxor for sse.
Chris Lattner17dab4a2008-01-10 05:45:39 +0000467let isReMaterializable = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000468def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +0000469 "pxor\t$dst, $dst", [(set FR32:$dst, fp32imm0)]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000470 Requires<[HasSSE1]>, TB, OpSize;
471
472// Alias instruction to do FR32 reg-to-reg copy using movaps. Upper bits are
473// disregarded.
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000474let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000475def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000476 "movaps\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000477
478// Alias instruction to load FR32 from f128mem using movaps. Upper bits are
479// disregarded.
Chris Lattner1a1932c2008-01-06 23:38:27 +0000480let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000481def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000482 "movaps\t{$src, $dst|$dst, $src}",
Dan Gohman11821702007-07-27 17:16:43 +0000483 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000484
485// Alias bitwise logical operations using SSE logical ops on packed FP values.
Evan Cheng3ea4d672008-03-05 08:19:16 +0000486let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000487let isCommutable = 1 in {
Evan Chengb783fa32007-07-19 01:14:50 +0000488 def FsANDPSrr : PSI<0x54, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000489 "andps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000490 [(set FR32:$dst, (X86fand FR32:$src1, FR32:$src2))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000491 def FsORPSrr : PSI<0x56, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000492 "orps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000493 [(set FR32:$dst, (X86for FR32:$src1, FR32:$src2))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000494 def FsXORPSrr : PSI<0x57, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000495 "xorps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000496 [(set FR32:$dst, (X86fxor FR32:$src1, FR32:$src2))]>;
497}
498
Evan Chengb783fa32007-07-19 01:14:50 +0000499def FsANDPSrm : PSI<0x54, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000500 "andps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000501 [(set FR32:$dst, (X86fand FR32:$src1,
Dan Gohman11821702007-07-27 17:16:43 +0000502 (memopfsf32 addr:$src2)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000503def FsORPSrm : PSI<0x56, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000504 "orps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000505 [(set FR32:$dst, (X86for FR32:$src1,
Dan Gohman11821702007-07-27 17:16:43 +0000506 (memopfsf32 addr:$src2)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000507def FsXORPSrm : PSI<0x57, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000508 "xorps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000509 [(set FR32:$dst, (X86fxor FR32:$src1,
Dan Gohman11821702007-07-27 17:16:43 +0000510 (memopfsf32 addr:$src2)))]>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000511let neverHasSideEffects = 1 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000512def FsANDNPSrr : PSI<0x55, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000513 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000514 "andnps\t{$src2, $dst|$dst, $src2}", []>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000515
516let mayLoad = 1 in
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000517def FsANDNPSrm : PSI<0x55, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000518 (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000519 "andnps\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000520}
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000521}
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000522
523/// basic_sse1_fp_binop_rm - SSE1 binops come in both scalar and vector forms.
524///
525/// In addition, we also have a special variant of the scalar form here to
526/// represent the associated intrinsic operation. This form is unlike the
527/// plain scalar form, in that it takes an entire vector (instead of a scalar)
528/// and leaves the top elements undefined.
529///
530/// These three forms can each be reg+reg or reg+mem, so there are a total of
531/// six "instructions".
532///
Evan Cheng3ea4d672008-03-05 08:19:16 +0000533let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000534multiclass basic_sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
535 SDNode OpNode, Intrinsic F32Int,
536 bit Commutable = 0> {
537 // Scalar operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000538 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000539 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000540 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
541 let isCommutable = Commutable;
542 }
543
544 // Scalar operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000545 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000546 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000547 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
548
549 // Vector operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000550 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000551 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000552 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
553 let isCommutable = Commutable;
554 }
555
556 // Vector operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000557 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000558 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +0000559 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000560
561 // Intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000562 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000563 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000564 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
565 let isCommutable = Commutable;
566 }
567
568 // Intrinsic operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000569 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000570 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000571 [(set VR128:$dst, (F32Int VR128:$src1,
572 sse_load_f32:$src2))]>;
573}
574}
575
576// Arithmetic instructions
577defm ADD : basic_sse1_fp_binop_rm<0x58, "add", fadd, int_x86_sse_add_ss, 1>;
578defm MUL : basic_sse1_fp_binop_rm<0x59, "mul", fmul, int_x86_sse_mul_ss, 1>;
579defm SUB : basic_sse1_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse_sub_ss>;
580defm DIV : basic_sse1_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse_div_ss>;
581
582/// sse1_fp_binop_rm - Other SSE1 binops
583///
584/// This multiclass is like basic_sse1_fp_binop_rm, with the addition of
585/// instructions for a full-vector intrinsic form. Operations that map
586/// onto C operators don't use this form since they just use the plain
587/// vector form instead of having a separate vector intrinsic form.
588///
589/// This provides a total of eight "instructions".
590///
Evan Cheng3ea4d672008-03-05 08:19:16 +0000591let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000592multiclass sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
593 SDNode OpNode,
594 Intrinsic F32Int,
595 Intrinsic V4F32Int,
596 bit Commutable = 0> {
597
598 // Scalar operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000599 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000600 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000601 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
602 let isCommutable = Commutable;
603 }
604
605 // Scalar operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000606 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000607 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000608 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
609
610 // Vector operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000611 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000612 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000613 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
614 let isCommutable = Commutable;
615 }
616
617 // Vector operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000618 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000619 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +0000620 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000621
622 // Intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000623 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000624 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000625 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
626 let isCommutable = Commutable;
627 }
628
629 // Intrinsic operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000630 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000631 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000632 [(set VR128:$dst, (F32Int VR128:$src1,
633 sse_load_f32:$src2))]>;
634
635 // Vector intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000636 def PSrr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000637 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000638 [(set VR128:$dst, (V4F32Int VR128:$src1, VR128:$src2))]> {
639 let isCommutable = Commutable;
640 }
641
642 // Vector intrinsic operation, reg+mem.
Dan Gohmanc747be52007-08-02 21:06:40 +0000643 def PSrm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000644 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000645 [(set VR128:$dst, (V4F32Int VR128:$src1, (load addr:$src2)))]>;
646}
647}
648
649defm MAX : sse1_fp_binop_rm<0x5F, "max", X86fmax,
650 int_x86_sse_max_ss, int_x86_sse_max_ps>;
651defm MIN : sse1_fp_binop_rm<0x5D, "min", X86fmin,
652 int_x86_sse_min_ss, int_x86_sse_min_ps>;
653
654//===----------------------------------------------------------------------===//
655// SSE packed FP Instructions
656
657// Move Instructions
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000658let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000659def MOVAPSrr : PSI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000660 "movaps\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +0000661let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000662def MOVAPSrm : PSI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000663 "movaps\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000664 [(set VR128:$dst, (alignedloadv4f32 addr:$src))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000665
Evan Chengb783fa32007-07-19 01:14:50 +0000666def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000667 "movaps\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000668 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000669
Chris Lattnerd1a9eb62008-01-11 06:59:07 +0000670let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000671def MOVUPSrr : PSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000672 "movups\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +0000673let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000674def MOVUPSrm : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000675 "movups\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000676 [(set VR128:$dst, (loadv4f32 addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000677def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000678 "movups\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000679 [(store (v4f32 VR128:$src), addr:$dst)]>;
680
681// Intrinsic forms of MOVUPS load and store
Chris Lattner1a1932c2008-01-06 23:38:27 +0000682let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000683def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000684 "movups\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000685 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000686def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000687 "movups\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000688 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000689
Evan Cheng3ea4d672008-03-05 08:19:16 +0000690let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000691 let AddedComplexity = 20 in {
692 def MOVLPSrm : PSI<0x12, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000693 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000694 "movlps\t{$src2, $dst|$dst, $src2}",
Evan Chengd743a5f2008-05-10 00:59:18 +0000695 [(set VR128:$dst,
696 (v4f32 (vector_shuffle VR128:$src1,
697 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
698 MOVLP_shuffle_mask)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000699 def MOVHPSrm : PSI<0x16, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000700 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000701 "movhps\t{$src2, $dst|$dst, $src2}",
Evan Chengd743a5f2008-05-10 00:59:18 +0000702 [(set VR128:$dst,
703 (v4f32 (vector_shuffle VR128:$src1,
704 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
705 MOVHP_shuffle_mask)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000706 } // AddedComplexity
Evan Cheng3ea4d672008-03-05 08:19:16 +0000707} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000708
Evan Chengd743a5f2008-05-10 00:59:18 +0000709
Evan Chengb783fa32007-07-19 01:14:50 +0000710def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000711 "movlps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000712 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
713 (iPTR 0))), addr:$dst)]>;
714
715// v2f64 extract element 1 is always custom lowered to unpack high to low
716// and extract element 0 so the non-store version isn't too horrible.
Evan Chengb783fa32007-07-19 01:14:50 +0000717def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000718 "movhps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000719 [(store (f64 (vector_extract
720 (v2f64 (vector_shuffle
721 (bc_v2f64 (v4f32 VR128:$src)), (undef),
722 UNPCKH_shuffle_mask)), (iPTR 0))),
723 addr:$dst)]>;
724
Evan Cheng3ea4d672008-03-05 08:19:16 +0000725let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000726let AddedComplexity = 15 in {
Evan Chengb783fa32007-07-19 01:14:50 +0000727def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000728 "movlhps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000729 [(set VR128:$dst,
730 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
731 MOVHP_shuffle_mask)))]>;
732
Evan Chengb783fa32007-07-19 01:14:50 +0000733def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000734 "movhlps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000735 [(set VR128:$dst,
736 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
737 MOVHLPS_shuffle_mask)))]>;
738} // AddedComplexity
Evan Cheng3ea4d672008-03-05 08:19:16 +0000739} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000740
741
742
743// Arithmetic
744
745/// sse1_fp_unop_rm - SSE1 unops come in both scalar and vector forms.
746///
747/// In addition, we also have a special variant of the scalar form here to
748/// represent the associated intrinsic operation. This form is unlike the
749/// plain scalar form, in that it takes an entire vector (instead of a
750/// scalar) and leaves the top elements undefined.
751///
752/// And, we have a special variant form for a full-vector intrinsic form.
753///
754/// These four forms can each have a reg or a mem operand, so there are a
755/// total of eight "instructions".
756///
757multiclass sse1_fp_unop_rm<bits<8> opc, string OpcodeStr,
758 SDNode OpNode,
759 Intrinsic F32Int,
760 Intrinsic V4F32Int,
761 bit Commutable = 0> {
762 // Scalar operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000763 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000764 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000765 [(set FR32:$dst, (OpNode FR32:$src))]> {
766 let isCommutable = Commutable;
767 }
768
769 // Scalar operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000770 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000771 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000772 [(set FR32:$dst, (OpNode (load addr:$src)))]>;
773
774 // Vector operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000775 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000776 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000777 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]> {
778 let isCommutable = Commutable;
779 }
780
781 // Vector operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000782 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000783 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +0000784 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000785
786 // Intrinsic operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000787 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000788 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000789 [(set VR128:$dst, (F32Int VR128:$src))]> {
790 let isCommutable = Commutable;
791 }
792
793 // Intrinsic operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000794 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000795 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000796 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
797
798 // Vector intrinsic operation, reg
Evan Chengb783fa32007-07-19 01:14:50 +0000799 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000800 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000801 [(set VR128:$dst, (V4F32Int VR128:$src))]> {
802 let isCommutable = Commutable;
803 }
804
805 // Vector intrinsic operation, mem
Dan Gohmanc747be52007-08-02 21:06:40 +0000806 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000807 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000808 [(set VR128:$dst, (V4F32Int (load addr:$src)))]>;
809}
810
811// Square root.
812defm SQRT : sse1_fp_unop_rm<0x51, "sqrt", fsqrt,
813 int_x86_sse_sqrt_ss, int_x86_sse_sqrt_ps>;
814
815// Reciprocal approximations. Note that these typically require refinement
816// in order to obtain suitable precision.
817defm RSQRT : sse1_fp_unop_rm<0x52, "rsqrt", X86frsqrt,
818 int_x86_sse_rsqrt_ss, int_x86_sse_rsqrt_ps>;
819defm RCP : sse1_fp_unop_rm<0x53, "rcp", X86frcp,
820 int_x86_sse_rcp_ss, int_x86_sse_rcp_ps>;
821
822// Logical
Evan Cheng3ea4d672008-03-05 08:19:16 +0000823let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000824 let isCommutable = 1 in {
825 def ANDPSrr : PSI<0x54, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000826 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000827 "andps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000828 [(set VR128:$dst, (v2i64
829 (and VR128:$src1, VR128:$src2)))]>;
830 def ORPSrr : PSI<0x56, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000831 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000832 "orps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000833 [(set VR128:$dst, (v2i64
834 (or VR128:$src1, VR128:$src2)))]>;
835 def XORPSrr : PSI<0x57, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000836 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000837 "xorps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000838 [(set VR128:$dst, (v2i64
839 (xor VR128:$src1, VR128:$src2)))]>;
840 }
841
842 def ANDPSrm : PSI<0x54, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000843 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000844 "andps\t{$src2, $dst|$dst, $src2}",
Evan Cheng8e92cd12007-07-19 23:34:10 +0000845 [(set VR128:$dst, (and (bc_v2i64 (v4f32 VR128:$src1)),
846 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000847 def ORPSrm : PSI<0x56, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000848 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000849 "orps\t{$src2, $dst|$dst, $src2}",
Evan Cheng8e92cd12007-07-19 23:34:10 +0000850 [(set VR128:$dst, (or (bc_v2i64 (v4f32 VR128:$src1)),
851 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000852 def XORPSrm : PSI<0x57, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000853 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000854 "xorps\t{$src2, $dst|$dst, $src2}",
Evan Cheng8e92cd12007-07-19 23:34:10 +0000855 [(set VR128:$dst, (xor (bc_v2i64 (v4f32 VR128:$src1)),
856 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000857 def ANDNPSrr : PSI<0x55, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000858 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000859 "andnps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000860 [(set VR128:$dst,
861 (v2i64 (and (xor VR128:$src1,
862 (bc_v2i64 (v4i32 immAllOnesV))),
863 VR128:$src2)))]>;
864 def ANDNPSrm : PSI<0x55, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000865 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000866 "andnps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000867 [(set VR128:$dst,
Evan Cheng8e92cd12007-07-19 23:34:10 +0000868 (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000869 (bc_v2i64 (v4i32 immAllOnesV))),
Evan Cheng8e92cd12007-07-19 23:34:10 +0000870 (memopv2i64 addr:$src2))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000871}
872
Evan Cheng3ea4d672008-03-05 08:19:16 +0000873let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000874 def CMPPSrri : PSIi8<0xC2, MRMSrcReg,
Nate Begeman061db5f2008-05-12 20:34:32 +0000875 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
876 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
877 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
878 VR128:$src, imm:$cc))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000879 def CMPPSrmi : PSIi8<0xC2, MRMSrcMem,
Nate Begeman061db5f2008-05-12 20:34:32 +0000880 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
881 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
882 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
883 (load addr:$src), imm:$cc))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000884}
Nate Begeman061db5f2008-05-12 20:34:32 +0000885def : Pat<(v4i32 (vsetcc (v4f32 VR128:$src1), VR128:$src2, cond:$cc)),
886 (CMPPSrri VR128:$src1, VR128:$src2, (SSE_CC_imm cond:$cc))>;
887def : Pat<(v4i32 (vsetcc (v4f32 VR128:$src1), (memop addr:$src2), cond:$cc)),
888 (CMPPSrmi VR128:$src1, addr:$src2, (SSE_CC_imm cond:$cc))>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000889
890// Shuffle and unpack instructions
Evan Cheng3ea4d672008-03-05 08:19:16 +0000891let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000892 let isConvertibleToThreeAddress = 1 in // Convert to pshufd
893 def SHUFPSrri : PSIi8<0xC6, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000894 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000895 VR128:$src2, i32i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +0000896 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000897 [(set VR128:$dst,
898 (v4f32 (vector_shuffle
899 VR128:$src1, VR128:$src2,
900 SHUFP_shuffle_mask:$src3)))]>;
901 def SHUFPSrmi : PSIi8<0xC6, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000902 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000903 f128mem:$src2, i32i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +0000904 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000905 [(set VR128:$dst,
906 (v4f32 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +0000907 VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000908 SHUFP_shuffle_mask:$src3)))]>;
909
910 let AddedComplexity = 10 in {
911 def UNPCKHPSrr : PSI<0x15, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000912 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000913 "unpckhps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000914 [(set VR128:$dst,
915 (v4f32 (vector_shuffle
916 VR128:$src1, VR128:$src2,
917 UNPCKH_shuffle_mask)))]>;
918 def UNPCKHPSrm : PSI<0x15, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000919 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000920 "unpckhps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000921 [(set VR128:$dst,
922 (v4f32 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +0000923 VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000924 UNPCKH_shuffle_mask)))]>;
925
926 def UNPCKLPSrr : PSI<0x14, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000927 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000928 "unpcklps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000929 [(set VR128:$dst,
930 (v4f32 (vector_shuffle
931 VR128:$src1, VR128:$src2,
932 UNPCKL_shuffle_mask)))]>;
933 def UNPCKLPSrm : PSI<0x14, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000934 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000935 "unpcklps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000936 [(set VR128:$dst,
937 (v4f32 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +0000938 VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000939 UNPCKL_shuffle_mask)))]>;
940 } // AddedComplexity
Evan Cheng3ea4d672008-03-05 08:19:16 +0000941} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000942
943// Mask creation
Evan Chengb783fa32007-07-19 01:14:50 +0000944def MOVMSKPSrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000945 "movmskps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000946 [(set GR32:$dst, (int_x86_sse_movmsk_ps VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000947def MOVMSKPDrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000948 "movmskpd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000949 [(set GR32:$dst, (int_x86_sse2_movmsk_pd VR128:$src))]>;
950
Evan Chengd1d68072008-03-08 00:58:38 +0000951// Prefetch intrinsic.
952def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
953 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
954def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
955 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
956def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
957 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
958def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
959 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000960
961// Non-temporal stores
Evan Chengb783fa32007-07-19 01:14:50 +0000962def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000963 "movntps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000964 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
965
966// Load, store, and memory fence
Evan Chengb783fa32007-07-19 01:14:50 +0000967def SFENCE : PSI<0xAE, MRM7m, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000968
969// MXCSR register
Evan Chengb783fa32007-07-19 01:14:50 +0000970def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000971 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000972def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
Dan Gohman91888f02007-07-31 20:11:57 +0000973 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000974
975// Alias instructions that map zero vector to pxor / xorp* for sse.
Chris Lattner17dab4a2008-01-10 05:45:39 +0000976let isReMaterializable = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000977def V_SET0 : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +0000978 "xorps\t$dst, $dst",
Chris Lattnere6aa3862007-11-25 00:24:49 +0000979 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000980
Evan Chenga15896e2008-03-12 07:02:50 +0000981let Predicates = [HasSSE1] in {
982 def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
983 def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
984 def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
985 def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
986 def : Pat<(v4f32 immAllZerosV), (V_SET0)>;
987}
988
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000989// FR32 to 128-bit vector conversion.
Evan Chengb783fa32007-07-19 01:14:50 +0000990def MOVSS2PSrr : SSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000991 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000992 [(set VR128:$dst,
993 (v4f32 (scalar_to_vector FR32:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000994def MOVSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000995 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000996 [(set VR128:$dst,
997 (v4f32 (scalar_to_vector (loadf32 addr:$src))))]>;
998
999// FIXME: may not be able to eliminate this movss with coalescing the src and
1000// dest register classes are different. We really want to write this pattern
1001// like this:
1002// def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
1003// (f32 FR32:$src)>;
Evan Chengb783fa32007-07-19 01:14:50 +00001004def MOVPS2SSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001005 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001006 [(set FR32:$dst, (vector_extract (v4f32 VR128:$src),
1007 (iPTR 0)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001008def MOVPS2SSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001009 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001010 [(store (f32 (vector_extract (v4f32 VR128:$src),
1011 (iPTR 0))), addr:$dst)]>;
1012
1013
1014// Move to lower bits of a VR128, leaving upper bits alone.
1015// Three operand (but two address) aliases.
Evan Cheng3ea4d672008-03-05 08:19:16 +00001016let Constraints = "$src1 = $dst" in {
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001017let neverHasSideEffects = 1 in
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001018 def MOVLSS2PSrr : SSI<0x10, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001019 (outs VR128:$dst), (ins VR128:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001020 "movss\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001021
1022 let AddedComplexity = 15 in
1023 def MOVLPSrr : SSI<0x10, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001024 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001025 "movss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001026 [(set VR128:$dst,
1027 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
1028 MOVL_shuffle_mask)))]>;
1029}
1030
1031// Move to lower bits of a VR128 and zeroing upper bits.
1032// Loading from memory automatically zeroing upper bits.
1033let AddedComplexity = 20 in
Evan Chengb783fa32007-07-19 01:14:50 +00001034def MOVZSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001035 "movss\t{$src, $dst|$dst, $src}",
Evan Chenge9b9c672008-05-09 21:53:03 +00001036 [(set VR128:$dst, (v4f32 (X86vzmovl (v4f32 (scalar_to_vector
Evan Cheng40ee6e52008-05-08 00:57:18 +00001037 (loadf32 addr:$src))))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001038
Evan Cheng056afe12008-05-20 18:24:47 +00001039def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
Evan Cheng40ee6e52008-05-08 00:57:18 +00001040 (MOVZSS2PSrm addr:$src)>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001041
1042//===----------------------------------------------------------------------===//
1043// SSE2 Instructions
1044//===----------------------------------------------------------------------===//
1045
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001046// Move Instructions
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001047let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001048def MOVSDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001049 "movsd\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +00001050let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001051def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001052 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001053 [(set FR64:$dst, (loadf64 addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001054def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001055 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001056 [(store FR64:$src, addr:$dst)]>;
1057
1058// Conversion instructions
Evan Chengb783fa32007-07-19 01:14:50 +00001059def CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001060 "cvttsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001061 [(set GR32:$dst, (fp_to_sint FR64:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001062def CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001063 "cvttsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001064 [(set GR32:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001065def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001066 "cvtsd2ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001067 [(set FR32:$dst, (fround FR64:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001068def CVTSD2SSrm : SDI<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001069 "cvtsd2ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001070 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001071def CVTSI2SDrr : SDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001072 "cvtsi2sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001073 [(set FR64:$dst, (sint_to_fp GR32:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001074def CVTSI2SDrm : SDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001075 "cvtsi2sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001076 [(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
1077
1078// SSE2 instructions with XS prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001079def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001080 "cvtss2sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001081 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
1082 Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001083def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001084 "cvtss2sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001085 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
1086 Requires<[HasSSE2]>;
1087
1088// Match intrinsics which expect XMM operand(s).
Evan Chengb783fa32007-07-19 01:14:50 +00001089def Int_CVTSD2SIrr : SDI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001090 "cvtsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001091 [(set GR32:$dst, (int_x86_sse2_cvtsd2si VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001092def Int_CVTSD2SIrm : SDI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001093 "cvtsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001094 [(set GR32:$dst, (int_x86_sse2_cvtsd2si
1095 (load addr:$src)))]>;
1096
Dale Johannesen1fbb4a52007-10-30 22:15:38 +00001097// Match intrinisics which expect MM and XMM operand(s).
1098def Int_CVTPD2PIrr : PDI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
1099 "cvtpd2pi\t{$src, $dst|$dst, $src}",
1100 [(set VR64:$dst, (int_x86_sse_cvtpd2pi VR128:$src))]>;
1101def Int_CVTPD2PIrm : PDI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
1102 "cvtpd2pi\t{$src, $dst|$dst, $src}",
1103 [(set VR64:$dst, (int_x86_sse_cvtpd2pi
1104 (load addr:$src)))]>;
1105def Int_CVTTPD2PIrr: PDI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
1106 "cvttpd2pi\t{$src, $dst|$dst, $src}",
1107 [(set VR64:$dst, (int_x86_sse_cvttpd2pi VR128:$src))]>;
1108def Int_CVTTPD2PIrm: PDI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
1109 "cvttpd2pi\t{$src, $dst|$dst, $src}",
1110 [(set VR64:$dst, (int_x86_sse_cvttpd2pi
1111 (load addr:$src)))]>;
1112def Int_CVTPI2PDrr : PDI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
1113 "cvtpi2pd\t{$src, $dst|$dst, $src}",
1114 [(set VR128:$dst, (int_x86_sse_cvtpi2pd VR64:$src))]>;
1115def Int_CVTPI2PDrm : PDI<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1116 "cvtpi2pd\t{$src, $dst|$dst, $src}",
1117 [(set VR128:$dst, (int_x86_sse_cvtpi2pd
1118 (load addr:$src)))]>;
1119
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001120// Aliases for intrinsics
Evan Chengb783fa32007-07-19 01:14:50 +00001121def Int_CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001122 "cvttsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001123 [(set GR32:$dst,
1124 (int_x86_sse2_cvttsd2si VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001125def Int_CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001126 "cvttsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001127 [(set GR32:$dst, (int_x86_sse2_cvttsd2si
1128 (load addr:$src)))]>;
1129
1130// Comparison instructions
Evan Cheng3ea4d672008-03-05 08:19:16 +00001131let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
Evan Cheng653c7ac2007-12-20 19:57:09 +00001132 def CMPSDrr : SDIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001133 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001134 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001135let mayLoad = 1 in
Evan Cheng653c7ac2007-12-20 19:57:09 +00001136 def CMPSDrm : SDIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001137 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001138 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001139}
1140
Evan Cheng950aac02007-09-25 01:57:46 +00001141let Defs = [EFLAGS] in {
Evan Chengb783fa32007-07-19 01:14:50 +00001142def UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001143 "ucomisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001144 [(X86cmp FR64:$src1, FR64:$src2), (implicit EFLAGS)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001145def UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001146 "ucomisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001147 [(X86cmp FR64:$src1, (loadf64 addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +00001148 (implicit EFLAGS)]>;
1149}
1150
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001151// Aliases to match intrinsics which expect XMM operand(s).
Evan Cheng3ea4d672008-03-05 08:19:16 +00001152let Constraints = "$src1 = $dst" in {
Evan Cheng653c7ac2007-12-20 19:57:09 +00001153 def Int_CMPSDrr : SDIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001154 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001155 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001156 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1157 VR128:$src, imm:$cc))]>;
Evan Cheng653c7ac2007-12-20 19:57:09 +00001158 def Int_CMPSDrm : SDIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001159 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001160 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001161 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1162 (load addr:$src), imm:$cc))]>;
1163}
1164
Evan Cheng950aac02007-09-25 01:57:46 +00001165let Defs = [EFLAGS] in {
Evan Chengb783fa32007-07-19 01:14:50 +00001166def Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001167 "ucomisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001168 [(X86ucomi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1169 (implicit EFLAGS)]>;
1170def Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001171 "ucomisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001172 [(X86ucomi (v2f64 VR128:$src1), (load addr:$src2)),
1173 (implicit EFLAGS)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001174
Evan Chengb783fa32007-07-19 01:14:50 +00001175def Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001176 "comisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001177 [(X86comi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1178 (implicit EFLAGS)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001179def Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001180 "comisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001181 [(X86comi (v2f64 VR128:$src1), (load addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +00001182 (implicit EFLAGS)]>;
1183} // Defs = EFLAGS]
1184
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001185// Aliases of packed SSE2 instructions for scalar use. These all have names that
1186// start with 'Fs'.
1187
1188// Alias instructions that map fld0 to pxor for sse.
Chris Lattner17dab4a2008-01-10 05:45:39 +00001189let isReMaterializable = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001190def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +00001191 "pxor\t$dst, $dst", [(set FR64:$dst, fpimm0)]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001192 Requires<[HasSSE2]>, TB, OpSize;
1193
1194// Alias instruction to do FR64 reg-to-reg copy using movapd. Upper bits are
1195// disregarded.
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001196let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001197def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001198 "movapd\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001199
1200// Alias instruction to load FR64 from f128mem using movapd. Upper bits are
1201// disregarded.
Chris Lattner1a1932c2008-01-06 23:38:27 +00001202let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001203def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001204 "movapd\t{$src, $dst|$dst, $src}",
Dan Gohman11821702007-07-27 17:16:43 +00001205 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001206
1207// Alias bitwise logical operations using SSE logical ops on packed FP values.
Evan Cheng3ea4d672008-03-05 08:19:16 +00001208let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001209let isCommutable = 1 in {
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001210 def FsANDPDrr : PDI<0x54, MRMSrcReg, (outs FR64:$dst),
1211 (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001212 "andpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001213 [(set FR64:$dst, (X86fand FR64:$src1, FR64:$src2))]>;
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001214 def FsORPDrr : PDI<0x56, MRMSrcReg, (outs FR64:$dst),
1215 (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001216 "orpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001217 [(set FR64:$dst, (X86for FR64:$src1, FR64:$src2))]>;
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001218 def FsXORPDrr : PDI<0x57, MRMSrcReg, (outs FR64:$dst),
1219 (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001220 "xorpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001221 [(set FR64:$dst, (X86fxor FR64:$src1, FR64:$src2))]>;
1222}
1223
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001224def FsANDPDrm : PDI<0x54, MRMSrcMem, (outs FR64:$dst),
1225 (ins FR64:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001226 "andpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001227 [(set FR64:$dst, (X86fand FR64:$src1,
Dan Gohman11821702007-07-27 17:16:43 +00001228 (memopfsf64 addr:$src2)))]>;
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001229def FsORPDrm : PDI<0x56, MRMSrcMem, (outs FR64:$dst),
1230 (ins FR64:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001231 "orpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001232 [(set FR64:$dst, (X86for FR64:$src1,
Dan Gohman11821702007-07-27 17:16:43 +00001233 (memopfsf64 addr:$src2)))]>;
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001234def FsXORPDrm : PDI<0x57, MRMSrcMem, (outs FR64:$dst),
1235 (ins FR64:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001236 "xorpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001237 [(set FR64:$dst, (X86fxor FR64:$src1,
Dan Gohman11821702007-07-27 17:16:43 +00001238 (memopfsf64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001239
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001240let neverHasSideEffects = 1 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001241def FsANDNPDrr : PDI<0x55, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001242 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001243 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001244let mayLoad = 1 in
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001245def FsANDNPDrm : PDI<0x55, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001246 (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001247 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001248}
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001249}
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001250
1251/// basic_sse2_fp_binop_rm - SSE2 binops come in both scalar and vector forms.
1252///
1253/// In addition, we also have a special variant of the scalar form here to
1254/// represent the associated intrinsic operation. This form is unlike the
1255/// plain scalar form, in that it takes an entire vector (instead of a scalar)
1256/// and leaves the top elements undefined.
1257///
1258/// These three forms can each be reg+reg or reg+mem, so there are a total of
1259/// six "instructions".
1260///
Evan Cheng3ea4d672008-03-05 08:19:16 +00001261let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001262multiclass basic_sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1263 SDNode OpNode, Intrinsic F64Int,
1264 bit Commutable = 0> {
1265 // Scalar operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001266 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001267 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001268 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1269 let isCommutable = Commutable;
1270 }
1271
1272 // Scalar operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001273 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001274 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001275 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1276
1277 // Vector operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001278 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001279 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001280 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1281 let isCommutable = Commutable;
1282 }
1283
1284 // Vector operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001285 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001286 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +00001287 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001288
1289 // Intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001290 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001291 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001292 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1293 let isCommutable = Commutable;
1294 }
1295
1296 // Intrinsic operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001297 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001298 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001299 [(set VR128:$dst, (F64Int VR128:$src1,
1300 sse_load_f64:$src2))]>;
1301}
1302}
1303
1304// Arithmetic instructions
1305defm ADD : basic_sse2_fp_binop_rm<0x58, "add", fadd, int_x86_sse2_add_sd, 1>;
1306defm MUL : basic_sse2_fp_binop_rm<0x59, "mul", fmul, int_x86_sse2_mul_sd, 1>;
1307defm SUB : basic_sse2_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse2_sub_sd>;
1308defm DIV : basic_sse2_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse2_div_sd>;
1309
1310/// sse2_fp_binop_rm - Other SSE2 binops
1311///
1312/// This multiclass is like basic_sse2_fp_binop_rm, with the addition of
1313/// instructions for a full-vector intrinsic form. Operations that map
1314/// onto C operators don't use this form since they just use the plain
1315/// vector form instead of having a separate vector intrinsic form.
1316///
1317/// This provides a total of eight "instructions".
1318///
Evan Cheng3ea4d672008-03-05 08:19:16 +00001319let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001320multiclass sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1321 SDNode OpNode,
1322 Intrinsic F64Int,
1323 Intrinsic V2F64Int,
1324 bit Commutable = 0> {
1325
1326 // Scalar operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001327 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001328 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001329 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1330 let isCommutable = Commutable;
1331 }
1332
1333 // Scalar operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001334 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001335 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001336 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1337
1338 // Vector operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001339 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001340 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001341 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1342 let isCommutable = Commutable;
1343 }
1344
1345 // Vector operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001346 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001347 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +00001348 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001349
1350 // Intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001351 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001352 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001353 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1354 let isCommutable = Commutable;
1355 }
1356
1357 // Intrinsic operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001358 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001359 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001360 [(set VR128:$dst, (F64Int VR128:$src1,
1361 sse_load_f64:$src2))]>;
1362
1363 // Vector intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001364 def PDrr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001365 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001366 [(set VR128:$dst, (V2F64Int VR128:$src1, VR128:$src2))]> {
1367 let isCommutable = Commutable;
1368 }
1369
1370 // Vector intrinsic operation, reg+mem.
Dan Gohmanc747be52007-08-02 21:06:40 +00001371 def PDrm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001372 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001373 [(set VR128:$dst, (V2F64Int VR128:$src1, (load addr:$src2)))]>;
1374}
1375}
1376
1377defm MAX : sse2_fp_binop_rm<0x5F, "max", X86fmax,
1378 int_x86_sse2_max_sd, int_x86_sse2_max_pd>;
1379defm MIN : sse2_fp_binop_rm<0x5D, "min", X86fmin,
1380 int_x86_sse2_min_sd, int_x86_sse2_min_pd>;
1381
1382//===----------------------------------------------------------------------===//
1383// SSE packed FP Instructions
1384
1385// Move Instructions
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001386let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001387def MOVAPDrr : PDI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001388 "movapd\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +00001389let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001390def MOVAPDrm : PDI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001391 "movapd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001392 [(set VR128:$dst, (alignedloadv2f64 addr:$src))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001393
Evan Chengb783fa32007-07-19 01:14:50 +00001394def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001395 "movapd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001396 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001397
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001398let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001399def MOVUPDrr : PDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001400 "movupd\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +00001401let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001402def MOVUPDrm : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001403 "movupd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001404 [(set VR128:$dst, (loadv2f64 addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001405def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001406 "movupd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001407 [(store (v2f64 VR128:$src), addr:$dst)]>;
1408
1409// Intrinsic forms of MOVUPD load and store
Evan Chengb783fa32007-07-19 01:14:50 +00001410def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001411 "movupd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001412 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001413def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001414 "movupd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001415 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001416
Evan Cheng3ea4d672008-03-05 08:19:16 +00001417let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001418 let AddedComplexity = 20 in {
1419 def MOVLPDrm : PDI<0x12, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001420 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001421 "movlpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001422 [(set VR128:$dst,
1423 (v2f64 (vector_shuffle VR128:$src1,
1424 (scalar_to_vector (loadf64 addr:$src2)),
1425 MOVLP_shuffle_mask)))]>;
1426 def MOVHPDrm : PDI<0x16, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001427 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001428 "movhpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001429 [(set VR128:$dst,
1430 (v2f64 (vector_shuffle VR128:$src1,
1431 (scalar_to_vector (loadf64 addr:$src2)),
1432 MOVHP_shuffle_mask)))]>;
1433 } // AddedComplexity
Evan Cheng3ea4d672008-03-05 08:19:16 +00001434} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001435
Evan Chengb783fa32007-07-19 01:14:50 +00001436def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001437 "movlpd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001438 [(store (f64 (vector_extract (v2f64 VR128:$src),
1439 (iPTR 0))), addr:$dst)]>;
1440
1441// v2f64 extract element 1 is always custom lowered to unpack high to low
1442// and extract element 0 so the non-store version isn't too horrible.
Evan Chengb783fa32007-07-19 01:14:50 +00001443def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001444 "movhpd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001445 [(store (f64 (vector_extract
1446 (v2f64 (vector_shuffle VR128:$src, (undef),
1447 UNPCKH_shuffle_mask)), (iPTR 0))),
1448 addr:$dst)]>;
1449
1450// SSE2 instructions without OpSize prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001451def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001452 "cvtdq2ps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001453 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1454 TB, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001455def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Evan Cheng14c97c32008-03-14 07:46:48 +00001456 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1457 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1458 (bitconvert (memopv2i64 addr:$src))))]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001459 TB, Requires<[HasSSE2]>;
1460
1461// SSE2 instructions with XS prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001462def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001463 "cvtdq2pd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001464 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1465 XS, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001466def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
Evan Cheng14c97c32008-03-14 07:46:48 +00001467 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1468 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1469 (bitconvert (memopv2i64 addr:$src))))]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001470 XS, Requires<[HasSSE2]>;
1471
Evan Chengb783fa32007-07-19 01:14:50 +00001472def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Evan Cheng14c97c32008-03-14 07:46:48 +00001473 "cvtps2dq\t{$src, $dst|$dst, $src}",
1474 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001475def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001476 "cvtps2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001477 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1478 (load addr:$src)))]>;
1479// SSE2 packed instructions with XS prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001480def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001481 "cvttps2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001482 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))]>,
1483 XS, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001484def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001485 "cvttps2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001486 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1487 (load addr:$src)))]>,
1488 XS, Requires<[HasSSE2]>;
1489
1490// SSE2 packed instructions with XD prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001491def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001492 "cvtpd2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001493 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1494 XD, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001495def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001496 "cvtpd2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001497 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1498 (load addr:$src)))]>,
1499 XD, Requires<[HasSSE2]>;
1500
Evan Chengb783fa32007-07-19 01:14:50 +00001501def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001502 "cvttpd2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001503 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
Evan Cheng14c97c32008-03-14 07:46:48 +00001504def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001505 "cvttpd2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001506 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1507 (load addr:$src)))]>;
1508
1509// SSE2 instructions without OpSize prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001510def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001511 "cvtps2pd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001512 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1513 TB, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001514def Int_CVTPS2PDrm : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001515 "cvtps2pd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001516 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1517 (load addr:$src)))]>,
1518 TB, Requires<[HasSSE2]>;
1519
Evan Chengb783fa32007-07-19 01:14:50 +00001520def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001521 "cvtpd2ps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001522 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001523def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001524 "cvtpd2ps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001525 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1526 (load addr:$src)))]>;
1527
1528// Match intrinsics which expect XMM operand(s).
1529// Aliases for intrinsics
Evan Cheng3ea4d672008-03-05 08:19:16 +00001530let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001531def Int_CVTSI2SDrr: SDI<0x2A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001532 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001533 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001534 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1535 GR32:$src2))]>;
1536def Int_CVTSI2SDrm: SDI<0x2A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001537 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001538 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001539 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1540 (loadi32 addr:$src2)))]>;
1541def Int_CVTSD2SSrr: SDI<0x5A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001542 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001543 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001544 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1545 VR128:$src2))]>;
1546def Int_CVTSD2SSrm: SDI<0x5A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001547 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001548 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001549 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1550 (load addr:$src2)))]>;
1551def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001552 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001553 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001554 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1555 VR128:$src2))]>, XS,
1556 Requires<[HasSSE2]>;
1557def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001558 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001559 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001560 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1561 (load addr:$src2)))]>, XS,
1562 Requires<[HasSSE2]>;
1563}
1564
1565// Arithmetic
1566
1567/// sse2_fp_unop_rm - SSE2 unops come in both scalar and vector forms.
1568///
1569/// In addition, we also have a special variant of the scalar form here to
1570/// represent the associated intrinsic operation. This form is unlike the
1571/// plain scalar form, in that it takes an entire vector (instead of a
1572/// scalar) and leaves the top elements undefined.
1573///
1574/// And, we have a special variant form for a full-vector intrinsic form.
1575///
1576/// These four forms can each have a reg or a mem operand, so there are a
1577/// total of eight "instructions".
1578///
1579multiclass sse2_fp_unop_rm<bits<8> opc, string OpcodeStr,
1580 SDNode OpNode,
1581 Intrinsic F64Int,
1582 Intrinsic V2F64Int,
1583 bit Commutable = 0> {
1584 // Scalar operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001585 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001586 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001587 [(set FR64:$dst, (OpNode FR64:$src))]> {
1588 let isCommutable = Commutable;
1589 }
1590
1591 // Scalar operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001592 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001593 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001594 [(set FR64:$dst, (OpNode (load addr:$src)))]>;
1595
1596 // Vector operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001597 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001598 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001599 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]> {
1600 let isCommutable = Commutable;
1601 }
1602
1603 // Vector operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001604 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001605 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +00001606 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001607
1608 // Intrinsic operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001609 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001610 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001611 [(set VR128:$dst, (F64Int VR128:$src))]> {
1612 let isCommutable = Commutable;
1613 }
1614
1615 // Intrinsic operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001616 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001617 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001618 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1619
1620 // Vector intrinsic operation, reg
Evan Chengb783fa32007-07-19 01:14:50 +00001621 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001622 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001623 [(set VR128:$dst, (V2F64Int VR128:$src))]> {
1624 let isCommutable = Commutable;
1625 }
1626
1627 // Vector intrinsic operation, mem
Dan Gohmanc747be52007-08-02 21:06:40 +00001628 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001629 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001630 [(set VR128:$dst, (V2F64Int (load addr:$src)))]>;
1631}
1632
1633// Square root.
1634defm SQRT : sse2_fp_unop_rm<0x51, "sqrt", fsqrt,
1635 int_x86_sse2_sqrt_sd, int_x86_sse2_sqrt_pd>;
1636
1637// There is no f64 version of the reciprocal approximation instructions.
1638
1639// Logical
Evan Cheng3ea4d672008-03-05 08:19:16 +00001640let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001641 let isCommutable = 1 in {
1642 def ANDPDrr : PDI<0x54, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001643 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001644 "andpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001645 [(set VR128:$dst,
1646 (and (bc_v2i64 (v2f64 VR128:$src1)),
1647 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1648 def ORPDrr : PDI<0x56, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001649 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001650 "orpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001651 [(set VR128:$dst,
1652 (or (bc_v2i64 (v2f64 VR128:$src1)),
1653 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1654 def XORPDrr : PDI<0x57, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001655 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001656 "xorpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001657 [(set VR128:$dst,
1658 (xor (bc_v2i64 (v2f64 VR128:$src1)),
1659 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1660 }
1661
1662 def ANDPDrm : PDI<0x54, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001663 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001664 "andpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001665 [(set VR128:$dst,
1666 (and (bc_v2i64 (v2f64 VR128:$src1)),
Evan Cheng8e92cd12007-07-19 23:34:10 +00001667 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001668 def ORPDrm : PDI<0x56, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001669 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001670 "orpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001671 [(set VR128:$dst,
1672 (or (bc_v2i64 (v2f64 VR128:$src1)),
Evan Cheng8e92cd12007-07-19 23:34:10 +00001673 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001674 def XORPDrm : PDI<0x57, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001675 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001676 "xorpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001677 [(set VR128:$dst,
1678 (xor (bc_v2i64 (v2f64 VR128:$src1)),
Evan Cheng8e92cd12007-07-19 23:34:10 +00001679 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001680 def ANDNPDrr : PDI<0x55, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001681 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001682 "andnpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001683 [(set VR128:$dst,
1684 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1685 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1686 def ANDNPDrm : PDI<0x55, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001687 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001688 "andnpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001689 [(set VR128:$dst,
1690 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
Evan Cheng8e92cd12007-07-19 23:34:10 +00001691 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001692}
1693
Evan Cheng3ea4d672008-03-05 08:19:16 +00001694let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001695 def CMPPDrri : PDIi8<0xC2, MRMSrcReg,
Evan Cheng14c97c32008-03-14 07:46:48 +00001696 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
1697 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1698 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
Nate Begeman061db5f2008-05-12 20:34:32 +00001699 VR128:$src, imm:$cc))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001700 def CMPPDrmi : PDIi8<0xC2, MRMSrcMem,
Evan Cheng14c97c32008-03-14 07:46:48 +00001701 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
1702 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1703 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
Nate Begeman061db5f2008-05-12 20:34:32 +00001704 (load addr:$src), imm:$cc))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001705}
Nate Begeman061db5f2008-05-12 20:34:32 +00001706def : Pat<(v2i64 (vsetcc (v2f64 VR128:$src1), VR128:$src2, cond:$cc)),
1707 (CMPPDrri VR128:$src1, VR128:$src2, (SSE_CC_imm cond:$cc))>;
1708def : Pat<(v2i64 (vsetcc (v2f64 VR128:$src1), (memop addr:$src2), cond:$cc)),
1709 (CMPPDrmi VR128:$src1, addr:$src2, (SSE_CC_imm cond:$cc))>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001710
1711// Shuffle and unpack instructions
Evan Cheng3ea4d672008-03-05 08:19:16 +00001712let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001713 def SHUFPDrri : PDIi8<0xC6, MRMSrcReg,
Evan Cheng14c97c32008-03-14 07:46:48 +00001714 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i8imm:$src3),
1715 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1716 [(set VR128:$dst, (v2f64 (vector_shuffle
1717 VR128:$src1, VR128:$src2,
1718 SHUFP_shuffle_mask:$src3)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001719 def SHUFPDrmi : PDIi8<0xC6, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001720 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001721 f128mem:$src2, i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +00001722 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001723 [(set VR128:$dst,
1724 (v2f64 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +00001725 VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001726 SHUFP_shuffle_mask:$src3)))]>;
1727
1728 let AddedComplexity = 10 in {
1729 def UNPCKHPDrr : PDI<0x15, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001730 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001731 "unpckhpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001732 [(set VR128:$dst,
1733 (v2f64 (vector_shuffle
1734 VR128:$src1, VR128:$src2,
1735 UNPCKH_shuffle_mask)))]>;
1736 def UNPCKHPDrm : PDI<0x15, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001737 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001738 "unpckhpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001739 [(set VR128:$dst,
1740 (v2f64 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +00001741 VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001742 UNPCKH_shuffle_mask)))]>;
1743
1744 def UNPCKLPDrr : PDI<0x14, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001745 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001746 "unpcklpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001747 [(set VR128:$dst,
1748 (v2f64 (vector_shuffle
1749 VR128:$src1, VR128:$src2,
1750 UNPCKL_shuffle_mask)))]>;
1751 def UNPCKLPDrm : PDI<0x14, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001752 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001753 "unpcklpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001754 [(set VR128:$dst,
1755 (v2f64 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +00001756 VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001757 UNPCKL_shuffle_mask)))]>;
1758 } // AddedComplexity
Evan Cheng3ea4d672008-03-05 08:19:16 +00001759} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001760
1761
1762//===----------------------------------------------------------------------===//
1763// SSE integer instructions
1764
1765// Move Instructions
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001766let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001767def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001768 "movdqa\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001769let isSimpleLoad = 1, mayLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001770def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001771 "movdqa\t{$src, $dst|$dst, $src}",
Evan Cheng51a49b22007-07-20 00:27:43 +00001772 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001773let mayStore = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001774def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001775 "movdqa\t{$src, $dst|$dst, $src}",
Evan Cheng51a49b22007-07-20 00:27:43 +00001776 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001777let isSimpleLoad = 1, mayLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001778def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001779 "movdqu\t{$src, $dst|$dst, $src}",
Evan Cheng51a49b22007-07-20 00:27:43 +00001780 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001781 XS, Requires<[HasSSE2]>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001782let mayStore = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001783def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001784 "movdqu\t{$src, $dst|$dst, $src}",
Evan Cheng51a49b22007-07-20 00:27:43 +00001785 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001786 XS, Requires<[HasSSE2]>;
1787
Dan Gohman4a4f1512007-07-18 20:23:34 +00001788// Intrinsic forms of MOVDQU load and store
Chris Lattner1a1932c2008-01-06 23:38:27 +00001789let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001790def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001791 "movdqu\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001792 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
1793 XS, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001794def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001795 "movdqu\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001796 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
1797 XS, Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001798
Evan Cheng88004752008-03-05 08:11:27 +00001799let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001800
1801multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
1802 bit Commutable = 0> {
Evan Chengb783fa32007-07-19 01:14:50 +00001803 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001804 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001805 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]> {
1806 let isCommutable = Commutable;
1807 }
Evan Chengb783fa32007-07-19 01:14:50 +00001808 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001809 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001810 [(set VR128:$dst, (IntId VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00001811 (bitconvert (memopv2i64 addr:$src2))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001812}
1813
Evan Chengf90f8f82008-05-03 00:52:09 +00001814multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
1815 string OpcodeStr,
1816 Intrinsic IntId, Intrinsic IntId2> {
1817 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1818 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1819 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
1820 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1821 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1822 [(set VR128:$dst, (IntId VR128:$src1,
1823 (bitconvert (memopv2i64 addr:$src2))))]>;
1824 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1825 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1826 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
1827}
1828
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001829/// PDI_binop_rm - Simple SSE2 binary operator.
1830multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
1831 ValueType OpVT, bit Commutable = 0> {
Evan Chengb783fa32007-07-19 01:14:50 +00001832 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001833 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001834 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]> {
1835 let isCommutable = Commutable;
1836 }
Evan Chengb783fa32007-07-19 01:14:50 +00001837 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001838 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001839 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00001840 (bitconvert (memopv2i64 addr:$src2)))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001841}
1842
1843/// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
1844///
1845/// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
1846/// to collapse (bitconvert VT to VT) into its operand.
1847///
1848multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
1849 bit Commutable = 0> {
Evan Chengb783fa32007-07-19 01:14:50 +00001850 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001851 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001852 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]> {
1853 let isCommutable = Commutable;
1854 }
Evan Chengb783fa32007-07-19 01:14:50 +00001855 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001856 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +00001857 [(set VR128:$dst, (OpNode VR128:$src1,(memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001858}
1859
Evan Cheng3ea4d672008-03-05 08:19:16 +00001860} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001861
1862// 128-bit Integer Arithmetic
1863
1864defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
1865defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
1866defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
1867defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
1868
1869defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
1870defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
1871defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
1872defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
1873
1874defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
1875defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
1876defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
1877defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
1878
1879defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
1880defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
1881defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
1882defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
1883
1884defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
1885
1886defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
1887defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w , 1>;
1888defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
1889
1890defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
1891
1892defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
1893defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
1894
1895
1896defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
1897defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
1898defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
1899defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
1900defm PSADBW : PDI_binop_rm_int<0xE0, "psadbw", int_x86_sse2_psad_bw, 1>;
1901
1902
Evan Chengf90f8f82008-05-03 00:52:09 +00001903defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
1904 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
1905defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
1906 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
1907defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
1908 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001909
Evan Chengf90f8f82008-05-03 00:52:09 +00001910defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
1911 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
1912defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
1913 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
Nate Begemanc2ca5f62008-05-13 17:52:09 +00001914defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
Evan Chengf90f8f82008-05-03 00:52:09 +00001915 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001916
Evan Chengf90f8f82008-05-03 00:52:09 +00001917defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
1918 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
Nate Begemand66fc342008-05-13 01:47:52 +00001919defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
Evan Chengf90f8f82008-05-03 00:52:09 +00001920 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001921
1922// 128-bit logical shifts.
Evan Cheng3ea4d672008-03-05 08:19:16 +00001923let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001924 def PSLLDQri : PDIi8<0x73, MRM7r,
Evan Chengb783fa32007-07-19 01:14:50 +00001925 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001926 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001927 def PSRLDQri : PDIi8<0x73, MRM3r,
Evan Chengb783fa32007-07-19 01:14:50 +00001928 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001929 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001930 // PSRADQri doesn't exist in SSE[1-3].
1931}
1932
1933let Predicates = [HasSSE2] in {
1934 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
1935 (v2i64 (PSLLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1936 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
1937 (v2i64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1938 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
1939 (v2f64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1940}
1941
1942// Logical
1943defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
1944defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or , 1>;
1945defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
1946
Evan Cheng3ea4d672008-03-05 08:19:16 +00001947let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001948 def PANDNrr : PDI<0xDF, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001949 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001950 "pandn\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001951 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
1952 VR128:$src2)))]>;
1953
1954 def PANDNrm : PDI<0xDF, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001955 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001956 "pandn\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001957 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
Dan Gohman7dc19012007-08-02 21:17:01 +00001958 (memopv2i64 addr:$src2))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001959}
1960
1961// SSE2 Integer comparison
1962defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b>;
1963defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w>;
1964defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d>;
1965defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
1966defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
1967defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
1968
Nate Begeman78ca4f92008-05-12 23:09:43 +00001969def : Pat<(v16i8 (vsetcc (v16i8 VR128:$src1), VR128:$src2, SETEQ)),
1970 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
1971def : Pat<(v16i8 (vsetcc (v16i8 VR128:$src1), (memop addr:$src2), SETEQ)),
1972 (PCMPEQBrm VR128:$src1, addr:$src2)>;
1973def : Pat<(v8i16 (vsetcc (v8i16 VR128:$src1), VR128:$src2, SETEQ)),
1974 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
1975def : Pat<(v8i16 (vsetcc (v8i16 VR128:$src1), (memop addr:$src2), SETEQ)),
1976 (PCMPEQWrm VR128:$src1, addr:$src2)>;
1977def : Pat<(v4i32 (vsetcc (v4i32 VR128:$src1), VR128:$src2, SETEQ)),
1978 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
1979def : Pat<(v4i32 (vsetcc (v4i32 VR128:$src1), (memop addr:$src2), SETEQ)),
1980 (PCMPEQDrm VR128:$src1, addr:$src2)>;
1981
1982def : Pat<(v16i8 (vsetcc (v16i8 VR128:$src1), VR128:$src2, SETGT)),
1983 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
1984def : Pat<(v16i8 (vsetcc (v16i8 VR128:$src1), (memop addr:$src2), SETGT)),
1985 (PCMPGTBrm VR128:$src1, addr:$src2)>;
1986def : Pat<(v8i16 (vsetcc (v8i16 VR128:$src1), VR128:$src2, SETGT)),
1987 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
1988def : Pat<(v8i16 (vsetcc (v8i16 VR128:$src1), (memop addr:$src2), SETGT)),
1989 (PCMPGTWrm VR128:$src1, addr:$src2)>;
1990def : Pat<(v4i32 (vsetcc (v4i32 VR128:$src1), VR128:$src2, SETGT)),
1991 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
1992def : Pat<(v4i32 (vsetcc (v4i32 VR128:$src1), (memop addr:$src2), SETGT)),
1993 (PCMPGTDrm VR128:$src1, addr:$src2)>;
1994
1995
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001996// Pack instructions
1997defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
1998defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
1999defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2000
2001// Shuffle and unpack instructions
2002def PSHUFDri : PDIi8<0x70, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002003 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002004 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002005 [(set VR128:$dst, (v4i32 (vector_shuffle
2006 VR128:$src1, (undef),
2007 PSHUFD_shuffle_mask:$src2)))]>;
2008def PSHUFDmi : PDIi8<0x70, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002009 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002010 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002011 [(set VR128:$dst, (v4i32 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00002012 (bc_v4i32(memopv2i64 addr:$src1)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002013 (undef),
2014 PSHUFD_shuffle_mask:$src2)))]>;
2015
2016// SSE2 with ImmT == Imm8 and XS prefix.
2017def PSHUFHWri : Ii8<0x70, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002018 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002019 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002020 [(set VR128:$dst, (v8i16 (vector_shuffle
2021 VR128:$src1, (undef),
2022 PSHUFHW_shuffle_mask:$src2)))]>,
2023 XS, Requires<[HasSSE2]>;
2024def PSHUFHWmi : Ii8<0x70, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002025 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002026 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002027 [(set VR128:$dst, (v8i16 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00002028 (bc_v8i16 (memopv2i64 addr:$src1)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002029 (undef),
2030 PSHUFHW_shuffle_mask:$src2)))]>,
2031 XS, Requires<[HasSSE2]>;
2032
2033// SSE2 with ImmT == Imm8 and XD prefix.
2034def PSHUFLWri : Ii8<0x70, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002035 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002036 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002037 [(set VR128:$dst, (v8i16 (vector_shuffle
2038 VR128:$src1, (undef),
2039 PSHUFLW_shuffle_mask:$src2)))]>,
2040 XD, Requires<[HasSSE2]>;
2041def PSHUFLWmi : Ii8<0x70, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002042 (outs VR128:$dst), (ins i128mem:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002043 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002044 [(set VR128:$dst, (v8i16 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00002045 (bc_v8i16 (memopv2i64 addr:$src1)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002046 (undef),
2047 PSHUFLW_shuffle_mask:$src2)))]>,
2048 XD, Requires<[HasSSE2]>;
2049
2050
Evan Cheng3ea4d672008-03-05 08:19:16 +00002051let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002052 def PUNPCKLBWrr : PDI<0x60, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002053 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002054 "punpcklbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002055 [(set VR128:$dst,
2056 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
2057 UNPCKL_shuffle_mask)))]>;
2058 def PUNPCKLBWrm : PDI<0x60, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002059 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002060 "punpcklbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002061 [(set VR128:$dst,
2062 (v16i8 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002063 (bc_v16i8 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002064 UNPCKL_shuffle_mask)))]>;
2065 def PUNPCKLWDrr : PDI<0x61, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002066 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002067 "punpcklwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002068 [(set VR128:$dst,
2069 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
2070 UNPCKL_shuffle_mask)))]>;
2071 def PUNPCKLWDrm : PDI<0x61, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002072 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002073 "punpcklwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002074 [(set VR128:$dst,
2075 (v8i16 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002076 (bc_v8i16 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002077 UNPCKL_shuffle_mask)))]>;
2078 def PUNPCKLDQrr : PDI<0x62, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002079 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002080 "punpckldq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002081 [(set VR128:$dst,
2082 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2083 UNPCKL_shuffle_mask)))]>;
2084 def PUNPCKLDQrm : PDI<0x62, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002085 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002086 "punpckldq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002087 [(set VR128:$dst,
2088 (v4i32 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002089 (bc_v4i32 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002090 UNPCKL_shuffle_mask)))]>;
2091 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002092 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002093 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002094 [(set VR128:$dst,
2095 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2096 UNPCKL_shuffle_mask)))]>;
2097 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002098 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002099 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002100 [(set VR128:$dst,
2101 (v2i64 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002102 (memopv2i64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002103 UNPCKL_shuffle_mask)))]>;
2104
2105 def PUNPCKHBWrr : PDI<0x68, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002106 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002107 "punpckhbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002108 [(set VR128:$dst,
2109 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
2110 UNPCKH_shuffle_mask)))]>;
2111 def PUNPCKHBWrm : PDI<0x68, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002112 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002113 "punpckhbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002114 [(set VR128:$dst,
2115 (v16i8 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002116 (bc_v16i8 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002117 UNPCKH_shuffle_mask)))]>;
2118 def PUNPCKHWDrr : PDI<0x69, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002119 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002120 "punpckhwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002121 [(set VR128:$dst,
2122 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
2123 UNPCKH_shuffle_mask)))]>;
2124 def PUNPCKHWDrm : PDI<0x69, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002125 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002126 "punpckhwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002127 [(set VR128:$dst,
2128 (v8i16 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002129 (bc_v8i16 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002130 UNPCKH_shuffle_mask)))]>;
2131 def PUNPCKHDQrr : PDI<0x6A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002132 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002133 "punpckhdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002134 [(set VR128:$dst,
2135 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2136 UNPCKH_shuffle_mask)))]>;
2137 def PUNPCKHDQrm : PDI<0x6A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002138 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002139 "punpckhdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002140 [(set VR128:$dst,
2141 (v4i32 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002142 (bc_v4i32 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002143 UNPCKH_shuffle_mask)))]>;
2144 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002145 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002146 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002147 [(set VR128:$dst,
2148 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2149 UNPCKH_shuffle_mask)))]>;
2150 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002151 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002152 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002153 [(set VR128:$dst,
2154 (v2i64 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002155 (memopv2i64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002156 UNPCKH_shuffle_mask)))]>;
2157}
2158
2159// Extract / Insert
2160def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002161 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002162 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002163 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
Nate Begemand77e59e2008-02-11 04:19:36 +00002164 imm:$src2))]>;
Evan Cheng3ea4d672008-03-05 08:19:16 +00002165let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002166 def PINSRWrri : PDIi8<0xC4, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002167 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002168 GR32:$src2, i32i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +00002169 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002170 [(set VR128:$dst,
Nate Begemand77e59e2008-02-11 04:19:36 +00002171 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002172 def PINSRWrmi : PDIi8<0xC4, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002173 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002174 i16mem:$src2, i32i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +00002175 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Nate Begemand77e59e2008-02-11 04:19:36 +00002176 [(set VR128:$dst,
2177 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2178 imm:$src3))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002179}
2180
2181// Mask creation
Evan Chengb783fa32007-07-19 01:14:50 +00002182def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002183 "pmovmskb\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002184 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2185
2186// Conditional store
Evan Cheng6e4d1d92007-09-11 19:55:27 +00002187let Uses = [EDI] in
Evan Chengb783fa32007-07-19 01:14:50 +00002188def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
Dan Gohman91888f02007-07-31 20:11:57 +00002189 "maskmovdqu\t{$mask, $src|$src, $mask}",
Evan Cheng6e4d1d92007-09-11 19:55:27 +00002190 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002191
2192// Non-temporal stores
Evan Chengb783fa32007-07-19 01:14:50 +00002193def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002194 "movntpd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002195 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002196def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002197 "movntdq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002198 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002199def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002200 "movnti\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002201 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2202 TB, Requires<[HasSSE2]>;
2203
2204// Flush cache
Evan Chengb783fa32007-07-19 01:14:50 +00002205def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002206 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002207 TB, Requires<[HasSSE2]>;
2208
2209// Load, store, and memory fence
Evan Chengb783fa32007-07-19 01:14:50 +00002210def LFENCE : I<0xAE, MRM5m, (outs), (ins),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002211 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002212def MFENCE : I<0xAE, MRM6m, (outs), (ins),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002213 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
2214
Andrew Lenharth785610d2008-02-16 01:24:58 +00002215//TODO: custom lower this so as to never even generate the noop
2216def : Pat<(membarrier (i8 imm:$ll), (i8 imm:$ls), (i8 imm:$sl), (i8 imm:$ss),
2217 (i8 0)), (NOOP)>;
2218def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
2219def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
2220def : Pat<(membarrier (i8 imm:$ll), (i8 imm:$ls), (i8 imm:$sl), (i8 imm:$ss),
2221 (i8 1)), (MFENCE)>;
2222
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002223// Alias instructions that map zero vector to pxor / xorp* for sse.
Chris Lattner17dab4a2008-01-10 05:45:39 +00002224let isReMaterializable = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00002225 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +00002226 "pcmpeqd\t$dst, $dst",
Chris Lattnere6aa3862007-11-25 00:24:49 +00002227 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002228
2229// FR64 to 128-bit vector conversion.
Evan Chengb783fa32007-07-19 01:14:50 +00002230def MOVSD2PDrr : SDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002231 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002232 [(set VR128:$dst,
2233 (v2f64 (scalar_to_vector FR64:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002234def MOVSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002235 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002236 [(set VR128:$dst,
2237 (v2f64 (scalar_to_vector (loadf64 addr:$src))))]>;
2238
Evan Chengb783fa32007-07-19 01:14:50 +00002239def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002240 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002241 [(set VR128:$dst,
2242 (v4i32 (scalar_to_vector GR32:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002243def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002244 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002245 [(set VR128:$dst,
2246 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2247
Evan Chengb783fa32007-07-19 01:14:50 +00002248def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002249 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002250 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2251
Evan Chengb783fa32007-07-19 01:14:50 +00002252def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002253 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002254 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2255
2256// SSE2 instructions with XS prefix
Evan Chengb783fa32007-07-19 01:14:50 +00002257def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002258 "movq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002259 [(set VR128:$dst,
2260 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
2261 Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002262def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002263 "movq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002264 [(store (i64 (vector_extract (v2i64 VR128:$src),
2265 (iPTR 0))), addr:$dst)]>;
2266
2267// FIXME: may not be able to eliminate this movss with coalescing the src and
2268// dest register classes are different. We really want to write this pattern
2269// like this:
2270// def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2271// (f32 FR32:$src)>;
Evan Chengb783fa32007-07-19 01:14:50 +00002272def MOVPD2SDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002273 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002274 [(set FR64:$dst, (vector_extract (v2f64 VR128:$src),
2275 (iPTR 0)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002276def MOVPD2SDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002277 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002278 [(store (f64 (vector_extract (v2f64 VR128:$src),
2279 (iPTR 0))), addr:$dst)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002280def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002281 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002282 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2283 (iPTR 0)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002284def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002285 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002286 [(store (i32 (vector_extract (v4i32 VR128:$src),
2287 (iPTR 0))), addr:$dst)]>;
2288
Evan Chengb783fa32007-07-19 01:14:50 +00002289def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002290 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002291 [(set GR32:$dst, (bitconvert FR32:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002292def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002293 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002294 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
2295
2296
2297// Move to lower bits of a VR128, leaving upper bits alone.
2298// Three operand (but two address) aliases.
Evan Cheng3ea4d672008-03-05 08:19:16 +00002299let Constraints = "$src1 = $dst" in {
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00002300 let neverHasSideEffects = 1 in
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002301 def MOVLSD2PDrr : SDI<0x10, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002302 (outs VR128:$dst), (ins VR128:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002303 "movsd\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002304
2305 let AddedComplexity = 15 in
2306 def MOVLPDrr : SDI<0x10, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002307 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002308 "movsd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002309 [(set VR128:$dst,
2310 (v2f64 (vector_shuffle VR128:$src1, VR128:$src2,
2311 MOVL_shuffle_mask)))]>;
2312}
2313
2314// Store / copy lower 64-bits of a XMM register.
Evan Chengb783fa32007-07-19 01:14:50 +00002315def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002316 "movq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002317 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
2318
2319// Move to lower bits of a VR128 and zeroing upper bits.
2320// Loading from memory automatically zeroing upper bits.
Evan Chengd743a5f2008-05-10 00:59:18 +00002321let AddedComplexity = 20 in {
2322def MOVZSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2323 "movsd\t{$src, $dst|$dst, $src}",
2324 [(set VR128:$dst,
2325 (v2f64 (X86vzmovl (v2f64 (scalar_to_vector
2326 (loadf64 addr:$src))))))]>;
Evan Cheng40ee6e52008-05-08 00:57:18 +00002327
Evan Cheng056afe12008-05-20 18:24:47 +00002328def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
2329 (MOVZSD2PDrm addr:$src)>;
2330def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
Evan Chengd743a5f2008-05-10 00:59:18 +00002331 (MOVZSD2PDrm addr:$src)>;
Evan Chenge9b9c672008-05-09 21:53:03 +00002332def : Pat<(v2f64 (X86vzload addr:$src)), (MOVZSD2PDrm addr:$src)>;
Evan Chengd743a5f2008-05-10 00:59:18 +00002333}
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002334
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002335// movd / movq to XMM register zero-extends
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002336let AddedComplexity = 15 in {
Evan Chengb783fa32007-07-19 01:14:50 +00002337def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002338 "movd\t{$src, $dst|$dst, $src}",
Evan Chenge9b9c672008-05-09 21:53:03 +00002339 [(set VR128:$dst, (v4i32 (X86vzmovl
Evan Cheng40ee6e52008-05-08 00:57:18 +00002340 (v4i32 (scalar_to_vector GR32:$src)))))]>;
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002341// This is X86-64 only.
2342def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2343 "mov{d|q}\t{$src, $dst|$dst, $src}",
Evan Chenge9b9c672008-05-09 21:53:03 +00002344 [(set VR128:$dst, (v2i64 (X86vzmovl
Evan Cheng40ee6e52008-05-08 00:57:18 +00002345 (v2i64 (scalar_to_vector GR64:$src)))))]>;
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002346}
2347
2348let AddedComplexity = 20 in {
Evan Chengb783fa32007-07-19 01:14:50 +00002349def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002350 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002351 [(set VR128:$dst,
Evan Chenge9b9c672008-05-09 21:53:03 +00002352 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
Evan Cheng40ee6e52008-05-08 00:57:18 +00002353 (loadi32 addr:$src))))))]>;
Evan Cheng3ad16c42008-05-22 18:56:56 +00002354
2355def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
2356 (MOVZDI2PDIrm addr:$src)>;
2357def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
2358 (MOVZDI2PDIrm addr:$src)>;
2359
Evan Chengb783fa32007-07-19 01:14:50 +00002360def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002361 "movq\t{$src, $dst|$dst, $src}",
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002362 [(set VR128:$dst,
Evan Chenge9b9c672008-05-09 21:53:03 +00002363 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
Evan Cheng40ee6e52008-05-08 00:57:18 +00002364 (loadi64 addr:$src))))))]>, XS,
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002365 Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002366
Evan Cheng3ad16c42008-05-22 18:56:56 +00002367def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
2368 (MOVZQI2PQIrm addr:$src)>;
2369def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
2370 (MOVZQI2PQIrm addr:$src)>;
Evan Chenge9b9c672008-05-09 21:53:03 +00002371def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
Evan Chengd743a5f2008-05-10 00:59:18 +00002372}
Evan Chenge9b9c672008-05-09 21:53:03 +00002373
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002374// Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
2375// IA32 document. movq xmm1, xmm2 does clear the high bits.
2376let AddedComplexity = 15 in
2377def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2378 "movq\t{$src, $dst|$dst, $src}",
Evan Chenge9b9c672008-05-09 21:53:03 +00002379 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002380 XS, Requires<[HasSSE2]>;
2381
Evan Cheng056afe12008-05-20 18:24:47 +00002382let AddedComplexity = 20 in {
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002383def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2384 "movq\t{$src, $dst|$dst, $src}",
Evan Chenge9b9c672008-05-09 21:53:03 +00002385 [(set VR128:$dst, (v2i64 (X86vzmovl
Evan Cheng056afe12008-05-20 18:24:47 +00002386 (loadv2i64 addr:$src))))]>,
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002387 XS, Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002388
Evan Cheng056afe12008-05-20 18:24:47 +00002389def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
2390 (MOVZPQILo2PQIrm addr:$src)>;
2391}
2392
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002393//===----------------------------------------------------------------------===//
2394// SSE3 Instructions
2395//===----------------------------------------------------------------------===//
2396
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002397// Move Instructions
Evan Chengb783fa32007-07-19 01:14:50 +00002398def MOVSHDUPrr : S3SI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002399 "movshdup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002400 [(set VR128:$dst, (v4f32 (vector_shuffle
2401 VR128:$src, (undef),
2402 MOVSHDUP_shuffle_mask)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002403def MOVSHDUPrm : S3SI<0x16, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002404 "movshdup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002405 [(set VR128:$dst, (v4f32 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00002406 (memopv4f32 addr:$src), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002407 MOVSHDUP_shuffle_mask)))]>;
2408
Evan Chengb783fa32007-07-19 01:14:50 +00002409def MOVSLDUPrr : S3SI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002410 "movsldup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002411 [(set VR128:$dst, (v4f32 (vector_shuffle
2412 VR128:$src, (undef),
2413 MOVSLDUP_shuffle_mask)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002414def MOVSLDUPrm : S3SI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002415 "movsldup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002416 [(set VR128:$dst, (v4f32 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00002417 (memopv4f32 addr:$src), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002418 MOVSLDUP_shuffle_mask)))]>;
2419
Evan Chengb783fa32007-07-19 01:14:50 +00002420def MOVDDUPrr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002421 "movddup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002422 [(set VR128:$dst, (v2f64 (vector_shuffle
2423 VR128:$src, (undef),
2424 SSE_splat_lo_mask)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002425def MOVDDUPrm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002426 "movddup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002427 [(set VR128:$dst,
2428 (v2f64 (vector_shuffle
2429 (scalar_to_vector (loadf64 addr:$src)),
2430 (undef),
2431 SSE_splat_lo_mask)))]>;
2432
2433// Arithmetic
Evan Cheng3ea4d672008-03-05 08:19:16 +00002434let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002435 def ADDSUBPSrr : S3DI<0xD0, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002436 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002437 "addsubps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002438 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
2439 VR128:$src2))]>;
2440 def ADDSUBPSrm : S3DI<0xD0, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002441 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002442 "addsubps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002443 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
2444 (load addr:$src2)))]>;
2445 def ADDSUBPDrr : S3I<0xD0, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002446 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002447 "addsubpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002448 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
2449 VR128:$src2))]>;
2450 def ADDSUBPDrm : S3I<0xD0, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002451 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002452 "addsubpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002453 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
2454 (load addr:$src2)))]>;
2455}
2456
Evan Chengb783fa32007-07-19 01:14:50 +00002457def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002458 "lddqu\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002459 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
2460
2461// Horizontal ops
2462class S3D_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
Evan Chengb783fa32007-07-19 01:14:50 +00002463 : S3DI<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002464 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002465 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
2466class S3D_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
Evan Chengb783fa32007-07-19 01:14:50 +00002467 : S3DI<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002468 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002469 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, (load addr:$src2))))]>;
2470class S3_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
Evan Chengb783fa32007-07-19 01:14:50 +00002471 : S3I<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002472 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002473 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
2474class S3_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
Evan Chengb783fa32007-07-19 01:14:50 +00002475 : S3I<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002476 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002477 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, (load addr:$src2))))]>;
2478
Evan Cheng3ea4d672008-03-05 08:19:16 +00002479let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002480 def HADDPSrr : S3D_Intrr<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2481 def HADDPSrm : S3D_Intrm<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2482 def HADDPDrr : S3_Intrr <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2483 def HADDPDrm : S3_Intrm <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2484 def HSUBPSrr : S3D_Intrr<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2485 def HSUBPSrm : S3D_Intrm<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2486 def HSUBPDrr : S3_Intrr <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2487 def HSUBPDrm : S3_Intrm <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2488}
2489
2490// Thread synchronization
Evan Chengb783fa32007-07-19 01:14:50 +00002491def MONITOR : I<0xC8, RawFrm, (outs), (ins), "monitor",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002492 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002493def MWAIT : I<0xC9, RawFrm, (outs), (ins), "mwait",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002494 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
2495
2496// vector_shuffle v1, <undef> <1, 1, 3, 3>
2497let AddedComplexity = 15 in
2498def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2499 MOVSHDUP_shuffle_mask)),
2500 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2501let AddedComplexity = 20 in
Dan Gohman4a4f1512007-07-18 20:23:34 +00002502def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002503 MOVSHDUP_shuffle_mask)),
2504 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
2505
2506// vector_shuffle v1, <undef> <0, 0, 2, 2>
2507let AddedComplexity = 15 in
2508 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2509 MOVSLDUP_shuffle_mask)),
2510 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2511let AddedComplexity = 20 in
Dan Gohman4a4f1512007-07-18 20:23:34 +00002512 def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002513 MOVSLDUP_shuffle_mask)),
2514 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
2515
2516//===----------------------------------------------------------------------===//
2517// SSSE3 Instructions
2518//===----------------------------------------------------------------------===//
2519
Bill Wendling98680292007-08-10 06:22:27 +00002520/// SS3I_unop_rm_int_8 - Simple SSSE3 unary operator whose type is v*i8.
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002521multiclass SS3I_unop_rm_int_8<bits<8> opc, string OpcodeStr,
2522 Intrinsic IntId64, Intrinsic IntId128> {
2523 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
2524 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2525 [(set VR64:$dst, (IntId64 VR64:$src))]>;
Bill Wendling98680292007-08-10 06:22:27 +00002526
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002527 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
2528 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2529 [(set VR64:$dst,
2530 (IntId64 (bitconvert (memopv8i8 addr:$src))))]>;
2531
2532 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2533 (ins VR128:$src),
2534 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2535 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2536 OpSize;
2537
2538 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2539 (ins i128mem:$src),
2540 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2541 [(set VR128:$dst,
2542 (IntId128
2543 (bitconvert (memopv16i8 addr:$src))))]>, OpSize;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002544}
2545
Bill Wendling98680292007-08-10 06:22:27 +00002546/// SS3I_unop_rm_int_16 - Simple SSSE3 unary operator whose type is v*i16.
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002547multiclass SS3I_unop_rm_int_16<bits<8> opc, string OpcodeStr,
2548 Intrinsic IntId64, Intrinsic IntId128> {
2549 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2550 (ins VR64:$src),
2551 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2552 [(set VR64:$dst, (IntId64 VR64:$src))]>;
Bill Wendling98680292007-08-10 06:22:27 +00002553
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002554 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2555 (ins i64mem:$src),
2556 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2557 [(set VR64:$dst,
2558 (IntId64
2559 (bitconvert (memopv4i16 addr:$src))))]>;
2560
2561 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2562 (ins VR128:$src),
2563 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2564 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2565 OpSize;
2566
2567 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2568 (ins i128mem:$src),
2569 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2570 [(set VR128:$dst,
2571 (IntId128
2572 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
Bill Wendling98680292007-08-10 06:22:27 +00002573}
2574
2575/// SS3I_unop_rm_int_32 - Simple SSSE3 unary operator whose type is v*i32.
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002576multiclass SS3I_unop_rm_int_32<bits<8> opc, string OpcodeStr,
2577 Intrinsic IntId64, Intrinsic IntId128> {
2578 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2579 (ins VR64:$src),
2580 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2581 [(set VR64:$dst, (IntId64 VR64:$src))]>;
Bill Wendling98680292007-08-10 06:22:27 +00002582
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002583 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2584 (ins i64mem:$src),
2585 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2586 [(set VR64:$dst,
2587 (IntId64
2588 (bitconvert (memopv2i32 addr:$src))))]>;
2589
2590 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2591 (ins VR128:$src),
2592 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2593 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2594 OpSize;
2595
2596 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2597 (ins i128mem:$src),
2598 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2599 [(set VR128:$dst,
2600 (IntId128
2601 (bitconvert (memopv4i32 addr:$src))))]>, OpSize;
Bill Wendling98680292007-08-10 06:22:27 +00002602}
2603
2604defm PABSB : SS3I_unop_rm_int_8 <0x1C, "pabsb",
2605 int_x86_ssse3_pabs_b,
2606 int_x86_ssse3_pabs_b_128>;
2607defm PABSW : SS3I_unop_rm_int_16<0x1D, "pabsw",
2608 int_x86_ssse3_pabs_w,
2609 int_x86_ssse3_pabs_w_128>;
2610defm PABSD : SS3I_unop_rm_int_32<0x1E, "pabsd",
2611 int_x86_ssse3_pabs_d,
2612 int_x86_ssse3_pabs_d_128>;
2613
2614/// SS3I_binop_rm_int_8 - Simple SSSE3 binary operator whose type is v*i8.
Evan Cheng3ea4d672008-03-05 08:19:16 +00002615let Constraints = "$src1 = $dst" in {
Bill Wendling98680292007-08-10 06:22:27 +00002616 multiclass SS3I_binop_rm_int_8<bits<8> opc, string OpcodeStr,
2617 Intrinsic IntId64, Intrinsic IntId128,
2618 bit Commutable = 0> {
2619 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2620 (ins VR64:$src1, VR64:$src2),
2621 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2622 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2623 let isCommutable = Commutable;
2624 }
2625 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2626 (ins VR64:$src1, i64mem:$src2),
2627 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2628 [(set VR64:$dst,
2629 (IntId64 VR64:$src1,
2630 (bitconvert (memopv8i8 addr:$src2))))]>;
2631
2632 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2633 (ins VR128:$src1, VR128:$src2),
2634 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2635 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2636 OpSize {
2637 let isCommutable = Commutable;
2638 }
2639 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2640 (ins VR128:$src1, i128mem:$src2),
2641 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2642 [(set VR128:$dst,
2643 (IntId128 VR128:$src1,
2644 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
2645 }
2646}
2647
2648/// SS3I_binop_rm_int_16 - Simple SSSE3 binary operator whose type is v*i16.
Evan Cheng3ea4d672008-03-05 08:19:16 +00002649let Constraints = "$src1 = $dst" in {
Bill Wendling98680292007-08-10 06:22:27 +00002650 multiclass SS3I_binop_rm_int_16<bits<8> opc, string OpcodeStr,
2651 Intrinsic IntId64, Intrinsic IntId128,
2652 bit Commutable = 0> {
2653 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2654 (ins VR64:$src1, VR64:$src2),
2655 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2656 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2657 let isCommutable = Commutable;
2658 }
2659 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2660 (ins VR64:$src1, i64mem:$src2),
2661 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2662 [(set VR64:$dst,
2663 (IntId64 VR64:$src1,
2664 (bitconvert (memopv4i16 addr:$src2))))]>;
2665
2666 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2667 (ins VR128:$src1, VR128:$src2),
2668 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2669 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2670 OpSize {
2671 let isCommutable = Commutable;
2672 }
2673 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2674 (ins VR128:$src1, i128mem:$src2),
2675 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2676 [(set VR128:$dst,
2677 (IntId128 VR128:$src1,
2678 (bitconvert (memopv8i16 addr:$src2))))]>, OpSize;
2679 }
2680}
2681
2682/// SS3I_binop_rm_int_32 - Simple SSSE3 binary operator whose type is v*i32.
Evan Cheng3ea4d672008-03-05 08:19:16 +00002683let Constraints = "$src1 = $dst" in {
Bill Wendling98680292007-08-10 06:22:27 +00002684 multiclass SS3I_binop_rm_int_32<bits<8> opc, string OpcodeStr,
2685 Intrinsic IntId64, Intrinsic IntId128,
2686 bit Commutable = 0> {
2687 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2688 (ins VR64:$src1, VR64:$src2),
2689 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2690 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2691 let isCommutable = Commutable;
2692 }
2693 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2694 (ins VR64:$src1, i64mem:$src2),
2695 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2696 [(set VR64:$dst,
2697 (IntId64 VR64:$src1,
2698 (bitconvert (memopv2i32 addr:$src2))))]>;
2699
2700 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2701 (ins VR128:$src1, VR128:$src2),
2702 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2703 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2704 OpSize {
2705 let isCommutable = Commutable;
2706 }
2707 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2708 (ins VR128:$src1, i128mem:$src2),
2709 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2710 [(set VR128:$dst,
2711 (IntId128 VR128:$src1,
2712 (bitconvert (memopv4i32 addr:$src2))))]>, OpSize;
2713 }
2714}
2715
2716defm PHADDW : SS3I_binop_rm_int_16<0x01, "phaddw",
2717 int_x86_ssse3_phadd_w,
2718 int_x86_ssse3_phadd_w_128, 1>;
2719defm PHADDD : SS3I_binop_rm_int_32<0x02, "phaddd",
2720 int_x86_ssse3_phadd_d,
2721 int_x86_ssse3_phadd_d_128, 1>;
2722defm PHADDSW : SS3I_binop_rm_int_16<0x03, "phaddsw",
2723 int_x86_ssse3_phadd_sw,
2724 int_x86_ssse3_phadd_sw_128, 1>;
2725defm PHSUBW : SS3I_binop_rm_int_16<0x05, "phsubw",
2726 int_x86_ssse3_phsub_w,
2727 int_x86_ssse3_phsub_w_128>;
2728defm PHSUBD : SS3I_binop_rm_int_32<0x06, "phsubd",
2729 int_x86_ssse3_phsub_d,
2730 int_x86_ssse3_phsub_d_128>;
2731defm PHSUBSW : SS3I_binop_rm_int_16<0x07, "phsubsw",
2732 int_x86_ssse3_phsub_sw,
2733 int_x86_ssse3_phsub_sw_128>;
2734defm PMADDUBSW : SS3I_binop_rm_int_8 <0x04, "pmaddubsw",
2735 int_x86_ssse3_pmadd_ub_sw,
2736 int_x86_ssse3_pmadd_ub_sw_128, 1>;
2737defm PMULHRSW : SS3I_binop_rm_int_16<0x0B, "pmulhrsw",
2738 int_x86_ssse3_pmul_hr_sw,
2739 int_x86_ssse3_pmul_hr_sw_128, 1>;
2740defm PSHUFB : SS3I_binop_rm_int_8 <0x00, "pshufb",
2741 int_x86_ssse3_pshuf_b,
2742 int_x86_ssse3_pshuf_b_128>;
2743defm PSIGNB : SS3I_binop_rm_int_8 <0x08, "psignb",
2744 int_x86_ssse3_psign_b,
2745 int_x86_ssse3_psign_b_128>;
2746defm PSIGNW : SS3I_binop_rm_int_16<0x09, "psignw",
2747 int_x86_ssse3_psign_w,
2748 int_x86_ssse3_psign_w_128>;
2749defm PSIGND : SS3I_binop_rm_int_32<0x09, "psignd",
2750 int_x86_ssse3_psign_d,
2751 int_x86_ssse3_psign_d_128>;
2752
Evan Cheng3ea4d672008-03-05 08:19:16 +00002753let Constraints = "$src1 = $dst" in {
Bill Wendling1dc817c2007-08-10 09:00:17 +00002754 def PALIGNR64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
2755 (ins VR64:$src1, VR64:$src2, i16imm:$src3),
Dale Johannesen576b27e2007-10-11 20:58:37 +00002756 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Bill Wendling1dc817c2007-08-10 09:00:17 +00002757 [(set VR64:$dst,
2758 (int_x86_ssse3_palign_r
2759 VR64:$src1, VR64:$src2,
2760 imm:$src3))]>;
2761 def PALIGNR64rm : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
2762 (ins VR64:$src1, i64mem:$src2, i16imm:$src3),
Dale Johannesen576b27e2007-10-11 20:58:37 +00002763 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Bill Wendling1dc817c2007-08-10 09:00:17 +00002764 [(set VR64:$dst,
2765 (int_x86_ssse3_palign_r
2766 VR64:$src1,
2767 (bitconvert (memopv2i32 addr:$src2)),
2768 imm:$src3))]>;
Bill Wendling98680292007-08-10 06:22:27 +00002769
Bill Wendling1dc817c2007-08-10 09:00:17 +00002770 def PALIGNR128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
2771 (ins VR128:$src1, VR128:$src2, i32imm:$src3),
Dale Johannesen576b27e2007-10-11 20:58:37 +00002772 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Bill Wendling1dc817c2007-08-10 09:00:17 +00002773 [(set VR128:$dst,
2774 (int_x86_ssse3_palign_r_128
2775 VR128:$src1, VR128:$src2,
2776 imm:$src3))]>, OpSize;
2777 def PALIGNR128rm : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
2778 (ins VR128:$src1, i128mem:$src2, i32imm:$src3),
Dale Johannesen576b27e2007-10-11 20:58:37 +00002779 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Bill Wendling1dc817c2007-08-10 09:00:17 +00002780 [(set VR128:$dst,
2781 (int_x86_ssse3_palign_r_128
2782 VR128:$src1,
2783 (bitconvert (memopv4i32 addr:$src2)),
2784 imm:$src3))]>, OpSize;
Bill Wendling98680292007-08-10 06:22:27 +00002785}
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002786
2787//===----------------------------------------------------------------------===//
2788// Non-Instruction Patterns
2789//===----------------------------------------------------------------------===//
2790
Chris Lattnerdec9cb52008-01-24 08:07:48 +00002791// extload f32 -> f64. This matches load+fextend because we have a hack in
2792// the isel (PreprocessForFPConvert) that can introduce loads after dag combine.
2793// Since these loads aren't folded into the fextend, we have to match it
2794// explicitly here.
2795let Predicates = [HasSSE2] in
2796 def : Pat<(fextend (loadf32 addr:$src)),
2797 (CVTSS2SDrm addr:$src)>;
2798
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002799// bit_convert
2800let Predicates = [HasSSE2] in {
2801 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
2802 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
2803 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
2804 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
2805 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
2806 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
2807 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
2808 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
2809 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
2810 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
2811 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
2812 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
2813 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
2814 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
2815 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
2816 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
2817 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
2818 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
2819 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
2820 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
2821 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
2822 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
2823 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
2824 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
2825 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
2826 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
2827 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
2828 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
2829 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
2830 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
2831}
2832
2833// Move scalar to XMM zero-extended
2834// movd to XMM register zero-extends
2835let AddedComplexity = 15 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002836// Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
Evan Chenge9b9c672008-05-09 21:53:03 +00002837def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002838 (MOVLSD2PDrr (V_SET0), FR64:$src)>, Requires<[HasSSE2]>;
Evan Chenge9b9c672008-05-09 21:53:03 +00002839def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002840 (MOVLSS2PSrr (V_SET0), FR32:$src)>, Requires<[HasSSE2]>;
Evan Chenge259e872008-05-09 23:37:55 +00002841def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
2842 (MOVLPSrr (V_SET0), VR128:$src)>, Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002843}
2844
2845// Splat v2f64 / v2i64
2846let AddedComplexity = 10 in {
2847def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2848 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2849def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2850 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2851def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2852 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2853def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2854 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2855}
2856
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002857// Special unary SHUFPSrri case.
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002858def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef),
2859 SHUFP_unary_shuffle_mask:$sm)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002860 (SHUFPSrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2861 Requires<[HasSSE1]>;
Dan Gohman7dc19012007-08-02 21:17:01 +00002862// Special unary SHUFPDrri case.
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002863def : Pat<(v2f64 (vector_shuffle VR128:$src1, (undef),
2864 SHUFP_unary_shuffle_mask:$sm)),
Dan Gohman7dc19012007-08-02 21:17:01 +00002865 (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2866 Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002867// Unary v4f32 shuffle with PSHUF* in order to fold a load.
Evan Chengbf8b2c52008-04-05 00:30:36 +00002868def : Pat<(vector_shuffle (bc_v4i32 (memopv4f32 addr:$src1)), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002869 SHUFP_unary_shuffle_mask:$sm),
2870 (PSHUFDmi addr:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2871 Requires<[HasSSE2]>;
2872// Special binary v4i32 shuffle cases with SHUFPS.
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002873def : Pat<(v4i32 (vector_shuffle VR128:$src1, (v4i32 VR128:$src2),
2874 PSHUFD_binary_shuffle_mask:$sm)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002875 (SHUFPSrri VR128:$src1, VR128:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2876 Requires<[HasSSE2]>;
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002877def : Pat<(v4i32 (vector_shuffle VR128:$src1,
2878 (bc_v4i32 (memopv2i64 addr:$src2)), PSHUFD_binary_shuffle_mask:$sm)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002879 (SHUFPSrmi VR128:$src1, addr:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2880 Requires<[HasSSE2]>;
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002881// Special binary v2i64 shuffle cases using SHUFPDrri.
2882def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2883 SHUFP_shuffle_mask:$sm)),
2884 (SHUFPDrri VR128:$src1, VR128:$src2, SHUFP_shuffle_mask:$sm)>,
2885 Requires<[HasSSE2]>;
2886// Special unary SHUFPDrri case.
2887def : Pat<(v2i64 (vector_shuffle VR128:$src1, (undef),
2888 SHUFP_unary_shuffle_mask:$sm)),
2889 (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2890 Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002891
2892// vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
2893let AddedComplexity = 10 in {
2894def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2895 UNPCKL_v_undef_shuffle_mask)),
2896 (UNPCKLPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2897def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2898 UNPCKL_v_undef_shuffle_mask)),
2899 (PUNPCKLBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2900def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2901 UNPCKL_v_undef_shuffle_mask)),
2902 (PUNPCKLWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2903def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2904 UNPCKL_v_undef_shuffle_mask)),
2905 (PUNPCKLDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2906}
2907
2908// vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
2909let AddedComplexity = 10 in {
2910def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2911 UNPCKH_v_undef_shuffle_mask)),
2912 (UNPCKHPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2913def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2914 UNPCKH_v_undef_shuffle_mask)),
2915 (PUNPCKHBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2916def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2917 UNPCKH_v_undef_shuffle_mask)),
2918 (PUNPCKHWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2919def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2920 UNPCKH_v_undef_shuffle_mask)),
2921 (PUNPCKHDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2922}
2923
2924let AddedComplexity = 15 in {
2925// vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
2926def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2927 MOVHP_shuffle_mask)),
2928 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
2929
2930// vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
2931def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2932 MOVHLPS_shuffle_mask)),
2933 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
2934
2935// vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
2936def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef),
2937 MOVHLPS_v_undef_shuffle_mask)),
2938 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2939def : Pat<(v4i32 (vector_shuffle VR128:$src1, (undef),
2940 MOVHLPS_v_undef_shuffle_mask)),
2941 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2942}
2943
2944let AddedComplexity = 20 in {
2945// vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
2946// vector_shuffle v1, (load v2) <0, 1, 4, 5> using MOVHPS
Dan Gohman4a4f1512007-07-18 20:23:34 +00002947def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002948 MOVLP_shuffle_mask)),
2949 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002950def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002951 MOVLP_shuffle_mask)),
2952 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002953def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002954 MOVHP_shuffle_mask)),
2955 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002956def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002957 MOVHP_shuffle_mask)),
2958 (MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2959
Dan Gohman4a4f1512007-07-18 20:23:34 +00002960def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002961 MOVLP_shuffle_mask)),
2962 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002963def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memopv2i64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002964 MOVLP_shuffle_mask)),
2965 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002966def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002967 MOVHP_shuffle_mask)),
2968 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002969def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memopv2i64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002970 MOVLP_shuffle_mask)),
2971 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2972}
2973
2974let AddedComplexity = 15 in {
2975// Setting the lowest element in the vector.
2976def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2977 MOVL_shuffle_mask)),
2978 (MOVLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2979def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2980 MOVL_shuffle_mask)),
2981 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2982
2983// vector_shuffle v1, v2 <4, 5, 2, 3> using MOVLPDrr (movsd)
2984def : Pat<(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
2985 MOVLP_shuffle_mask)),
2986 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2987def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2988 MOVLP_shuffle_mask)),
2989 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2990}
2991
2992// Set lowest element and zero upper elements.
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002993let AddedComplexity = 15 in
2994def : Pat<(v2f64 (vector_shuffle immAllZerosV_bc, VR128:$src,
2995 MOVL_shuffle_mask)),
2996 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
Evan Chenge9b9c672008-05-09 21:53:03 +00002997def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
Evan Chengd09a8a02008-05-08 22:35:02 +00002998 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002999
3000// FIXME: Temporary workaround since 2-wide shuffle is broken.
3001def : Pat<(int_x86_sse2_movs_d VR128:$src1, VR128:$src2),
3002 (v2f64 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
3003def : Pat<(int_x86_sse2_loadh_pd VR128:$src1, addr:$src2),
3004 (v2f64 (MOVHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
3005def : Pat<(int_x86_sse2_loadl_pd VR128:$src1, addr:$src2),
3006 (v2f64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
3007def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, VR128:$src2, imm:$src3),
3008 (v2f64 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$src3))>,
3009 Requires<[HasSSE2]>;
3010def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, (load addr:$src2), imm:$src3),
3011 (v2f64 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$src3))>,
3012 Requires<[HasSSE2]>;
3013def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, VR128:$src2),
3014 (v2f64 (UNPCKHPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
3015def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, (load addr:$src2)),
3016 (v2f64 (UNPCKHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
3017def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, VR128:$src2),
3018 (v2f64 (UNPCKLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
3019def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, (load addr:$src2)),
3020 (v2f64 (UNPCKLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
3021def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, VR128:$src2),
3022 (v2i64 (PUNPCKHQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
3023def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, (load addr:$src2)),
3024 (v2i64 (PUNPCKHQDQrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
3025def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, VR128:$src2),
3026 (v2i64 (PUNPCKLQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
3027def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, (load addr:$src2)),
3028 (PUNPCKLQDQrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3029
3030// Some special case pandn patterns.
3031def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3032 VR128:$src2)),
3033 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3034def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3035 VR128:$src2)),
3036 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3037def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3038 VR128:$src2)),
3039 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3040
3041def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
Dan Gohman7dc19012007-08-02 21:17:01 +00003042 (memopv2i64 addr:$src2))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003043 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3044def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
Dan Gohman7dc19012007-08-02 21:17:01 +00003045 (memopv2i64 addr:$src2))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003046 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3047def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
Dan Gohman7dc19012007-08-02 21:17:01 +00003048 (memopv2i64 addr:$src2))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003049 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3050
Nate Begeman78246ca2007-11-17 03:58:34 +00003051// vector -> vector casts
3052def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3053 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3054def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3055 (Int_CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3056
Evan Cheng51a49b22007-07-20 00:27:43 +00003057// Use movaps / movups for SSE integer load / store (one byte shorter).
Dan Gohman11821702007-07-27 17:16:43 +00003058def : Pat<(alignedloadv4i32 addr:$src),
3059 (MOVAPSrm addr:$src)>, Requires<[HasSSE1]>;
3060def : Pat<(loadv4i32 addr:$src),
3061 (MOVUPSrm addr:$src)>, Requires<[HasSSE1]>;
Evan Cheng51a49b22007-07-20 00:27:43 +00003062def : Pat<(alignedloadv2i64 addr:$src),
3063 (MOVAPSrm addr:$src)>, Requires<[HasSSE2]>;
3064def : Pat<(loadv2i64 addr:$src),
3065 (MOVUPSrm addr:$src)>, Requires<[HasSSE2]>;
3066
3067def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3068 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3069def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3070 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3071def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3072 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3073def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3074 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3075def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3076 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3077def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3078 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3079def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3080 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3081def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3082 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
Nate Begemanb2975562008-02-03 07:18:54 +00003083
3084//===----------------------------------------------------------------------===//
3085// SSE4.1 Instructions
3086//===----------------------------------------------------------------------===//
3087
Nate Begemanb2975562008-02-03 07:18:54 +00003088multiclass sse41_fp_unop_rm<bits<8> opcss, bits<8> opcps,
3089 bits<8> opcsd, bits<8> opcpd,
3090 string OpcodeStr,
3091 Intrinsic F32Int,
3092 Intrinsic V4F32Int,
3093 Intrinsic F64Int,
Nate Begemaneb3f5432008-02-04 05:34:34 +00003094 Intrinsic V2F64Int> {
Nate Begemanb2975562008-02-03 07:18:54 +00003095 // Intrinsic operation, reg.
Evan Cheng78d00612008-03-14 07:39:27 +00003096 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
Nate Begeman72d802a2008-02-04 06:00:24 +00003097 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003098 !strconcat(OpcodeStr,
3099 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003100 [(set VR128:$dst, (F32Int VR128:$src1, imm:$src2))]>,
3101 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003102
3103 // Intrinsic operation, mem.
Evan Cheng78d00612008-03-14 07:39:27 +00003104 def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
Nate Begeman72d802a2008-02-04 06:00:24 +00003105 (outs VR128:$dst), (ins ssmem:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003106 !strconcat(OpcodeStr,
3107 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003108 [(set VR128:$dst, (F32Int sse_load_f32:$src1, imm:$src2))]>,
3109 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003110
3111 // Vector intrinsic operation, reg
Evan Cheng78d00612008-03-14 07:39:27 +00003112 def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
Nate Begeman72d802a2008-02-04 06:00:24 +00003113 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003114 !strconcat(OpcodeStr,
3115 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003116 [(set VR128:$dst, (V4F32Int VR128:$src1, imm:$src2))]>,
3117 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003118
3119 // Vector intrinsic operation, mem
Evan Cheng78d00612008-03-14 07:39:27 +00003120 def PSm_Int : SS4AIi8<opcps, MRMSrcMem,
Nate Begeman72d802a2008-02-04 06:00:24 +00003121 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003122 !strconcat(OpcodeStr,
3123 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003124 [(set VR128:$dst, (V4F32Int (load addr:$src1),imm:$src2))]>,
3125 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003126
3127 // Intrinsic operation, reg.
Evan Cheng78d00612008-03-14 07:39:27 +00003128 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
Nate Begeman72d802a2008-02-04 06:00:24 +00003129 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003130 !strconcat(OpcodeStr,
3131 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003132 [(set VR128:$dst, (F64Int VR128:$src1, imm:$src2))]>,
3133 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003134
3135 // Intrinsic operation, mem.
Evan Cheng78d00612008-03-14 07:39:27 +00003136 def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
Nate Begeman72d802a2008-02-04 06:00:24 +00003137 (outs VR128:$dst), (ins sdmem:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003138 !strconcat(OpcodeStr,
3139 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003140 [(set VR128:$dst, (F64Int sse_load_f64:$src1, imm:$src2))]>,
3141 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003142
3143 // Vector intrinsic operation, reg
Evan Cheng78d00612008-03-14 07:39:27 +00003144 def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
Nate Begeman72d802a2008-02-04 06:00:24 +00003145 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003146 !strconcat(OpcodeStr,
3147 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003148 [(set VR128:$dst, (V2F64Int VR128:$src1, imm:$src2))]>,
3149 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003150
3151 // Vector intrinsic operation, mem
Evan Cheng78d00612008-03-14 07:39:27 +00003152 def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
Nate Begeman72d802a2008-02-04 06:00:24 +00003153 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003154 !strconcat(OpcodeStr,
3155 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003156 [(set VR128:$dst, (V2F64Int (load addr:$src1),imm:$src2))]>,
3157 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003158}
3159
3160// FP round - roundss, roundps, roundsd, roundpd
3161defm ROUND : sse41_fp_unop_rm<0x0A, 0x08, 0x0B, 0x09, "round",
3162 int_x86_sse41_round_ss, int_x86_sse41_round_ps,
3163 int_x86_sse41_round_sd, int_x86_sse41_round_pd>;
Nate Begemaneb3f5432008-02-04 05:34:34 +00003164
3165// SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
3166multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
3167 Intrinsic IntId128> {
3168 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3169 (ins VR128:$src),
3170 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3171 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
3172 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3173 (ins i128mem:$src),
3174 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3175 [(set VR128:$dst,
3176 (IntId128
3177 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
3178}
3179
3180defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
3181 int_x86_sse41_phminposuw>;
3182
3183/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
Evan Cheng3ea4d672008-03-05 08:19:16 +00003184let Constraints = "$src1 = $dst" in {
Nate Begemaneb3f5432008-02-04 05:34:34 +00003185 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
3186 Intrinsic IntId128, bit Commutable = 0> {
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003187 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3188 (ins VR128:$src1, VR128:$src2),
3189 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3190 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3191 OpSize {
Nate Begemaneb3f5432008-02-04 05:34:34 +00003192 let isCommutable = Commutable;
3193 }
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003194 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3195 (ins VR128:$src1, i128mem:$src2),
3196 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3197 [(set VR128:$dst,
3198 (IntId128 VR128:$src1,
3199 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
Nate Begemaneb3f5432008-02-04 05:34:34 +00003200 }
3201}
3202
3203defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq",
3204 int_x86_sse41_pcmpeqq, 1>;
3205defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw",
3206 int_x86_sse41_packusdw, 0>;
3207defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb",
3208 int_x86_sse41_pminsb, 1>;
3209defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd",
3210 int_x86_sse41_pminsd, 1>;
3211defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud",
3212 int_x86_sse41_pminud, 1>;
3213defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw",
3214 int_x86_sse41_pminuw, 1>;
3215defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb",
3216 int_x86_sse41_pmaxsb, 1>;
3217defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd",
3218 int_x86_sse41_pmaxsd, 1>;
3219defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud",
3220 int_x86_sse41_pmaxud, 1>;
3221defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw",
3222 int_x86_sse41_pmaxuw, 1>;
Nate Begemaneb3f5432008-02-04 05:34:34 +00003223defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq",
3224 int_x86_sse41_pmuldq, 1>;
Nate Begeman72d802a2008-02-04 06:00:24 +00003225
Nate Begeman58057962008-02-09 01:38:08 +00003226
3227/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
Evan Cheng3ea4d672008-03-05 08:19:16 +00003228let Constraints = "$src1 = $dst" in {
Nate Begeman58057962008-02-09 01:38:08 +00003229 multiclass SS41I_binop_patint<bits<8> opc, string OpcodeStr, SDNode OpNode,
3230 Intrinsic IntId128, bit Commutable = 0> {
3231 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3232 (ins VR128:$src1, VR128:$src2),
3233 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3234 [(set VR128:$dst, (OpNode (v4i32 VR128:$src1),
3235 VR128:$src2))]>, OpSize {
3236 let isCommutable = Commutable;
3237 }
3238 def rr_int : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3239 (ins VR128:$src1, VR128:$src2),
3240 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3241 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3242 OpSize {
3243 let isCommutable = Commutable;
3244 }
3245 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3246 (ins VR128:$src1, i128mem:$src2),
3247 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3248 [(set VR128:$dst,
3249 (OpNode VR128:$src1, (memopv4i32 addr:$src2)))]>, OpSize;
3250 def rm_int : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3251 (ins VR128:$src1, i128mem:$src2),
3252 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3253 [(set VR128:$dst,
3254 (IntId128 VR128:$src1, (memopv4i32 addr:$src2)))]>,
3255 OpSize;
3256 }
3257}
3258defm PMULLD : SS41I_binop_patint<0x40, "pmulld", mul,
3259 int_x86_sse41_pmulld, 1>;
3260
3261
Evan Cheng78d00612008-03-14 07:39:27 +00003262/// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
Evan Cheng3ea4d672008-03-05 08:19:16 +00003263let Constraints = "$src1 = $dst" in {
Nate Begeman72d802a2008-02-04 06:00:24 +00003264 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
3265 Intrinsic IntId128, bit Commutable = 0> {
Evan Cheng78d00612008-03-14 07:39:27 +00003266 def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003267 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
3268 !strconcat(OpcodeStr,
Nate Begemanb4e9a042008-02-10 18:47:57 +00003269 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003270 [(set VR128:$dst,
3271 (IntId128 VR128:$src1, VR128:$src2, imm:$src3))]>,
3272 OpSize {
Nate Begeman72d802a2008-02-04 06:00:24 +00003273 let isCommutable = Commutable;
3274 }
Evan Cheng78d00612008-03-14 07:39:27 +00003275 def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003276 (ins VR128:$src1, i128mem:$src2, i32i8imm:$src3),
3277 !strconcat(OpcodeStr,
Nate Begemanb4e9a042008-02-10 18:47:57 +00003278 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003279 [(set VR128:$dst,
3280 (IntId128 VR128:$src1,
3281 (bitconvert (memopv16i8 addr:$src2)), imm:$src3))]>,
3282 OpSize;
Nate Begeman72d802a2008-02-04 06:00:24 +00003283 }
3284}
3285
3286defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps",
3287 int_x86_sse41_blendps, 0>;
3288defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd",
3289 int_x86_sse41_blendpd, 0>;
3290defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw",
3291 int_x86_sse41_pblendw, 0>;
3292defm DPPS : SS41I_binop_rmi_int<0x40, "dpps",
3293 int_x86_sse41_dpps, 1>;
3294defm DPPD : SS41I_binop_rmi_int<0x41, "dppd",
3295 int_x86_sse41_dppd, 1>;
3296defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw",
3297 int_x86_sse41_mpsadbw, 0>;
Nate Begeman58057962008-02-09 01:38:08 +00003298
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003299
Evan Cheng78d00612008-03-14 07:39:27 +00003300/// SS41I_ternary_int - SSE 4.1 ternary operator
Evan Cheng3ea4d672008-03-05 08:19:16 +00003301let Uses = [XMM0], Constraints = "$src1 = $dst" in {
Nate Begemanb4e9a042008-02-10 18:47:57 +00003302 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3303 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3304 (ins VR128:$src1, VR128:$src2),
3305 !strconcat(OpcodeStr,
3306 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
3307 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
3308 OpSize;
3309
3310 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3311 (ins VR128:$src1, i128mem:$src2),
3312 !strconcat(OpcodeStr,
3313 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
3314 [(set VR128:$dst,
3315 (IntId VR128:$src1,
3316 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
3317 }
3318}
3319
3320defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
3321defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
3322defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
3323
3324
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003325multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3326 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3327 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3328 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3329
3330 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3331 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3332 [(set VR128:$dst,
3333 (IntId (bitconvert (v4i32 (load addr:$src)))))]>, OpSize;
3334}
3335
3336defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
3337defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
3338defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
3339defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
3340defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
3341defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
3342
3343multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3344 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3345 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3346 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3347
3348 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3349 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3350 [(set VR128:$dst,
3351 (IntId (bitconvert (v4i32 (load addr:$src)))))]>, OpSize;
3352}
3353
3354defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
3355defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
3356defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
3357defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
3358
3359multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3360 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3361 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3362 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3363
3364 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
3365 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3366 [(set VR128:$dst,
3367 (IntId (bitconvert (v4i32 (load addr:$src)))))]>, OpSize;
3368}
3369
3370defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
3371defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovsxbq", int_x86_sse41_pmovzxbq>;
3372
3373
Nate Begemand77e59e2008-02-11 04:19:36 +00003374/// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
3375multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
Evan Chengc2054be2008-03-26 08:11:49 +00003376 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003377 (ins VR128:$src1, i32i8imm:$src2),
3378 !strconcat(OpcodeStr,
3379 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemand77e59e2008-02-11 04:19:36 +00003380 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
3381 OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003382 def mr : SS4AIi8<opc, MRMDestMem, (outs),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003383 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
3384 !strconcat(OpcodeStr,
3385 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemand77e59e2008-02-11 04:19:36 +00003386 []>, OpSize;
3387// FIXME:
3388// There's an AssertZext in the way of writing the store pattern
3389// (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003390}
3391
Nate Begemand77e59e2008-02-11 04:19:36 +00003392defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003393
Nate Begemand77e59e2008-02-11 04:19:36 +00003394
3395/// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
3396multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
Evan Cheng78d00612008-03-14 07:39:27 +00003397 def mr : SS4AIi8<opc, MRMDestMem, (outs),
Nate Begemand77e59e2008-02-11 04:19:36 +00003398 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
3399 !strconcat(OpcodeStr,
3400 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3401 []>, OpSize;
3402// FIXME:
3403// There's an AssertZext in the way of writing the store pattern
3404// (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
3405}
3406
3407defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
3408
3409
3410/// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
3411multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
Evan Chengc2054be2008-03-26 08:11:49 +00003412 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003413 (ins VR128:$src1, i32i8imm:$src2),
3414 !strconcat(OpcodeStr,
3415 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3416 [(set GR32:$dst,
3417 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003418 def mr : SS4AIi8<opc, MRMDestMem, (outs),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003419 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
3420 !strconcat(OpcodeStr,
3421 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3422 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
3423 addr:$dst)]>, OpSize;
Nate Begeman58057962008-02-09 01:38:08 +00003424}
3425
Nate Begemand77e59e2008-02-11 04:19:36 +00003426defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
Nate Begeman58057962008-02-09 01:38:08 +00003427
Nate Begemand77e59e2008-02-11 04:19:36 +00003428
Evan Cheng6c249332008-03-24 21:52:23 +00003429/// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
3430/// destination
Nate Begemand77e59e2008-02-11 04:19:36 +00003431multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
Evan Chengc2054be2008-03-26 08:11:49 +00003432 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003433 (ins VR128:$src1, i32i8imm:$src2),
3434 !strconcat(OpcodeStr,
3435 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Dan Gohman788db592008-04-16 02:32:24 +00003436 [(set GR32:$dst,
3437 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
Evan Cheng6c249332008-03-24 21:52:23 +00003438 OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003439 def mr : SS4AIi8<opc, MRMDestMem, (outs),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003440 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
3441 !strconcat(OpcodeStr,
3442 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Evan Cheng6c249332008-03-24 21:52:23 +00003443 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003444 addr:$dst)]>, OpSize;
Nate Begeman58057962008-02-09 01:38:08 +00003445}
3446
Nate Begemand77e59e2008-02-11 04:19:36 +00003447defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003448
Evan Cheng3ea4d672008-03-05 08:19:16 +00003449let Constraints = "$src1 = $dst" in {
Nate Begemand77e59e2008-02-11 04:19:36 +00003450 multiclass SS41I_insert8<bits<8> opc, string OpcodeStr> {
Evan Cheng78d00612008-03-14 07:39:27 +00003451 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003452 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
3453 !strconcat(OpcodeStr,
3454 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3455 [(set VR128:$dst,
3456 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003457 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003458 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
3459 !strconcat(OpcodeStr,
3460 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3461 [(set VR128:$dst,
3462 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
3463 imm:$src3))]>, OpSize;
3464 }
3465}
3466
3467defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
3468
Evan Cheng3ea4d672008-03-05 08:19:16 +00003469let Constraints = "$src1 = $dst" in {
Nate Begemand77e59e2008-02-11 04:19:36 +00003470 multiclass SS41I_insert32<bits<8> opc, string OpcodeStr> {
Evan Cheng78d00612008-03-14 07:39:27 +00003471 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003472 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
3473 !strconcat(OpcodeStr,
3474 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3475 [(set VR128:$dst,
3476 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
3477 OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003478 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003479 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
3480 !strconcat(OpcodeStr,
3481 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3482 [(set VR128:$dst,
3483 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
3484 imm:$src3)))]>, OpSize;
3485 }
3486}
3487
3488defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
3489
Evan Cheng3ea4d672008-03-05 08:19:16 +00003490let Constraints = "$src1 = $dst" in {
Nate Begemand77e59e2008-02-11 04:19:36 +00003491 multiclass SS41I_insertf32<bits<8> opc, string OpcodeStr> {
Evan Cheng78d00612008-03-14 07:39:27 +00003492 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003493 (ins VR128:$src1, FR32:$src2, i32i8imm:$src3),
3494 !strconcat(OpcodeStr,
3495 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3496 [(set VR128:$dst,
3497 (X86insrtps VR128:$src1, FR32:$src2, imm:$src3))]>, OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003498 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003499 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
3500 !strconcat(OpcodeStr,
3501 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3502 [(set VR128:$dst,
3503 (X86insrtps VR128:$src1, (loadf32 addr:$src2),
3504 imm:$src3))]>, OpSize;
3505 }
3506}
3507
Evan Chengc2054be2008-03-26 08:11:49 +00003508defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
Nate Begeman0dd3cb52008-03-16 21:14:46 +00003509
3510let Defs = [EFLAGS] in {
3511def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
3512 "ptest \t{$src2, $src1|$src1, $src2}", []>, OpSize;
3513def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
3514 "ptest \t{$src2, $src1|$src1, $src2}", []>, OpSize;
3515}
3516
3517def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3518 "movntdqa\t{$src, $dst|$dst, $src}",
3519 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>;