blob: 6bf2024b8260d18bcc1d5a664344a10a53197440 [file] [log] [blame]
Arnold Schwaighofer373e8652007-10-12 21:30:57 +00001//====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner081ce942007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file describes the X86 SSE instruction set, defining the instructions,
11// and properties of the instructions which are needed for code generation,
12// machine code emission, and analysis.
13//
14//===----------------------------------------------------------------------===//
15
16
17//===----------------------------------------------------------------------===//
18// SSE specific DAG Nodes.
19//===----------------------------------------------------------------------===//
20
21def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
22 SDTCisFP<0>, SDTCisInt<2> ]>;
23
Dan Gohmanf17a25c2007-07-18 16:29:46 +000024def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
25def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
26def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
27 [SDNPCommutative, SDNPAssociative]>;
28def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
29 [SDNPCommutative, SDNPAssociative]>;
30def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
31 [SDNPCommutative, SDNPAssociative]>;
32def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
33def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
34def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
Evan Chengf37bf452007-10-01 18:12:48 +000035def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
Evan Cheng621216e2007-09-29 00:00:36 +000036def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
Nate Begemand77e59e2008-02-11 04:19:36 +000037def X86pextrb : SDNode<"X86ISD::PEXTRB",
38 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
39def X86pextrw : SDNode<"X86ISD::PEXTRW",
40 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
41def X86pinsrb : SDNode<"X86ISD::PINSRB",
42 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
43 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
44def X86pinsrw : SDNode<"X86ISD::PINSRW",
45 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
46 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
47def X86insrtps : SDNode<"X86ISD::INSERTPS",
48 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
49 SDTCisVT<2, f32>, SDTCisPtrTy<3>]>>;
Evan Chenge9b9c672008-05-09 21:53:03 +000050def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
51 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
52def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
53 [SDNPHasChain, SDNPMayLoad]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +000054
55//===----------------------------------------------------------------------===//
Dan Gohmanf17a25c2007-07-18 16:29:46 +000056// SSE Complex Patterns
57//===----------------------------------------------------------------------===//
58
59// These are 'extloads' from a scalar to the low element of a vector, zeroing
60// the top elements. These are used for the SSE 'ss' and 'sd' instruction
61// forms.
62def sse_load_f32 : ComplexPattern<v4f32, 4, "SelectScalarSSELoad", [],
Chris Lattnerc90ee9c2008-01-10 07:59:24 +000063 [SDNPHasChain, SDNPMayLoad]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +000064def sse_load_f64 : ComplexPattern<v2f64, 4, "SelectScalarSSELoad", [],
Chris Lattnerc90ee9c2008-01-10 07:59:24 +000065 [SDNPHasChain, SDNPMayLoad]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +000066
67def ssmem : Operand<v4f32> {
68 let PrintMethod = "printf32mem";
69 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
70}
71def sdmem : Operand<v2f64> {
72 let PrintMethod = "printf64mem";
73 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
74}
75
76//===----------------------------------------------------------------------===//
77// SSE pattern fragments
78//===----------------------------------------------------------------------===//
79
Dan Gohmanf17a25c2007-07-18 16:29:46 +000080def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
81def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
82def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
83def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
84
Dan Gohman11821702007-07-27 17:16:43 +000085// Like 'store', but always requires vector alignment.
Dan Gohman4a4f1512007-07-18 20:23:34 +000086def alignedstore : PatFrag<(ops node:$val, node:$ptr),
87 (st node:$val, node:$ptr), [{
88 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
89 return !ST->isTruncatingStore() &&
90 ST->getAddressingMode() == ISD::UNINDEXED &&
Dan Gohman11821702007-07-27 17:16:43 +000091 ST->getAlignment() >= 16;
Dan Gohman4a4f1512007-07-18 20:23:34 +000092 return false;
93}]>;
94
Dan Gohman11821702007-07-27 17:16:43 +000095// Like 'load', but always requires vector alignment.
Dan Gohman4a4f1512007-07-18 20:23:34 +000096def alignedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
97 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
98 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
99 LD->getAddressingMode() == ISD::UNINDEXED &&
Dan Gohman11821702007-07-27 17:16:43 +0000100 LD->getAlignment() >= 16;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000101 return false;
102}]>;
103
Dan Gohman11821702007-07-27 17:16:43 +0000104def alignedloadfsf32 : PatFrag<(ops node:$ptr), (f32 (alignedload node:$ptr))>;
105def alignedloadfsf64 : PatFrag<(ops node:$ptr), (f64 (alignedload node:$ptr))>;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000106def alignedloadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (alignedload node:$ptr))>;
107def alignedloadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (alignedload node:$ptr))>;
108def alignedloadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (alignedload node:$ptr))>;
109def alignedloadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (alignedload node:$ptr))>;
110
111// Like 'load', but uses special alignment checks suitable for use in
112// memory operands in most SSE instructions, which are required to
113// be naturally aligned on some targets but not on others.
114// FIXME: Actually implement support for targets that don't require the
115// alignment. This probably wants a subtarget predicate.
116def memop : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
117 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
118 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
119 LD->getAddressingMode() == ISD::UNINDEXED &&
Dan Gohman11821702007-07-27 17:16:43 +0000120 LD->getAlignment() >= 16;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000121 return false;
122}]>;
123
Dan Gohman11821702007-07-27 17:16:43 +0000124def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
125def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000126def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
127def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
128def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
129def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
Nate Begeman9a58b8a2008-02-09 23:46:37 +0000130def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
Dan Gohman4a4f1512007-07-18 20:23:34 +0000131
Bill Wendling3b15d722007-08-11 09:52:53 +0000132// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
133// 16-byte boundary.
Nate Begeman9a58b8a2008-02-09 23:46:37 +0000134// FIXME: 8 byte alignment for mmx reads is not required
Bill Wendling3b15d722007-08-11 09:52:53 +0000135def memop64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
136 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
137 return LD->getExtensionType() == ISD::NON_EXTLOAD &&
138 LD->getAddressingMode() == ISD::UNINDEXED &&
139 LD->getAlignment() >= 8;
140 return false;
141}]>;
142
143def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
Bill Wendling3b15d722007-08-11 09:52:53 +0000144def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
145def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
146def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
147
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000148def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
149def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
150def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
151def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
152def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
153def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
154
155def fp32imm0 : PatLeaf<(f32 fpimm), [{
156 return N->isExactlyValue(+0.0);
157}]>;
158
159def PSxLDQ_imm : SDNodeXForm<imm, [{
160 // Transformation function: imm >> 3
161 return getI32Imm(N->getValue() >> 3);
162}]>;
163
Nate Begeman061db5f2008-05-12 20:34:32 +0000164def SSE_CC_imm : SDNodeXForm<cond, [{
165 unsigned Val;
166 switch (N->get()) {
167 default: Val = 0; assert(0 && "Unexpected CondCode"); break;
168 case ISD::SETOEQ: Val = 0; break;
169 case ISD::SETOLT: Val = 1; break;
170 case ISD::SETOLE: Val = 2; break;
171 case ISD::SETUO: Val = 3; break;
172 case ISD::SETONE: Val = 4; break;
173 case ISD::SETOGE: Val = 5; break;
174 case ISD::SETOGT: Val = 6; break;
175 case ISD::SETO: Val = 7; break;
176 }
177 return getI8Imm(Val);
178}]>;
179
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000180// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
181// SHUFP* etc. imm.
182def SHUFFLE_get_shuf_imm : SDNodeXForm<build_vector, [{
183 return getI8Imm(X86::getShuffleSHUFImmediate(N));
184}]>;
185
186// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
187// PSHUFHW imm.
188def SHUFFLE_get_pshufhw_imm : SDNodeXForm<build_vector, [{
189 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
190}]>;
191
192// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
193// PSHUFLW imm.
194def SHUFFLE_get_pshuflw_imm : SDNodeXForm<build_vector, [{
195 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
196}]>;
197
198def SSE_splat_mask : PatLeaf<(build_vector), [{
199 return X86::isSplatMask(N);
200}], SHUFFLE_get_shuf_imm>;
201
202def SSE_splat_lo_mask : PatLeaf<(build_vector), [{
203 return X86::isSplatLoMask(N);
204}]>;
205
206def MOVHLPS_shuffle_mask : PatLeaf<(build_vector), [{
207 return X86::isMOVHLPSMask(N);
208}]>;
209
210def MOVHLPS_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
211 return X86::isMOVHLPS_v_undef_Mask(N);
212}]>;
213
214def MOVHP_shuffle_mask : PatLeaf<(build_vector), [{
215 return X86::isMOVHPMask(N);
216}]>;
217
218def MOVLP_shuffle_mask : PatLeaf<(build_vector), [{
219 return X86::isMOVLPMask(N);
220}]>;
221
222def MOVL_shuffle_mask : PatLeaf<(build_vector), [{
223 return X86::isMOVLMask(N);
224}]>;
225
226def MOVSHDUP_shuffle_mask : PatLeaf<(build_vector), [{
227 return X86::isMOVSHDUPMask(N);
228}]>;
229
230def MOVSLDUP_shuffle_mask : PatLeaf<(build_vector), [{
231 return X86::isMOVSLDUPMask(N);
232}]>;
233
234def UNPCKL_shuffle_mask : PatLeaf<(build_vector), [{
235 return X86::isUNPCKLMask(N);
236}]>;
237
238def UNPCKH_shuffle_mask : PatLeaf<(build_vector), [{
239 return X86::isUNPCKHMask(N);
240}]>;
241
242def UNPCKL_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
243 return X86::isUNPCKL_v_undef_Mask(N);
244}]>;
245
246def UNPCKH_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
247 return X86::isUNPCKH_v_undef_Mask(N);
248}]>;
249
250def PSHUFD_shuffle_mask : PatLeaf<(build_vector), [{
251 return X86::isPSHUFDMask(N);
252}], SHUFFLE_get_shuf_imm>;
253
254def PSHUFHW_shuffle_mask : PatLeaf<(build_vector), [{
255 return X86::isPSHUFHWMask(N);
256}], SHUFFLE_get_pshufhw_imm>;
257
258def PSHUFLW_shuffle_mask : PatLeaf<(build_vector), [{
259 return X86::isPSHUFLWMask(N);
260}], SHUFFLE_get_pshuflw_imm>;
261
262def SHUFP_unary_shuffle_mask : PatLeaf<(build_vector), [{
263 return X86::isPSHUFDMask(N);
264}], SHUFFLE_get_shuf_imm>;
265
266def SHUFP_shuffle_mask : PatLeaf<(build_vector), [{
267 return X86::isSHUFPMask(N);
268}], SHUFFLE_get_shuf_imm>;
269
270def PSHUFD_binary_shuffle_mask : PatLeaf<(build_vector), [{
271 return X86::isSHUFPMask(N);
272}], SHUFFLE_get_shuf_imm>;
273
Nate Begeman061db5f2008-05-12 20:34:32 +0000274
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000275//===----------------------------------------------------------------------===//
276// SSE scalar FP Instructions
277//===----------------------------------------------------------------------===//
278
279// CMOV* - Used to implement the SSE SELECT DAG operation. Expanded by the
280// scheduler into a branch sequence.
Evan Cheng950aac02007-09-25 01:57:46 +0000281// These are expanded by the scheduler.
282let Uses = [EFLAGS], usesCustomDAGSchedInserter = 1 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000283 def CMOV_FR32 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000284 (outs FR32:$dst), (ins FR32:$t, FR32:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000285 "#CMOV_FR32 PSEUDO!",
Evan Cheng621216e2007-09-29 00:00:36 +0000286 [(set FR32:$dst, (X86cmov FR32:$t, FR32:$f, imm:$cond,
287 EFLAGS))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000288 def CMOV_FR64 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000289 (outs FR64:$dst), (ins FR64:$t, FR64:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000290 "#CMOV_FR64 PSEUDO!",
Evan Cheng621216e2007-09-29 00:00:36 +0000291 [(set FR64:$dst, (X86cmov FR64:$t, FR64:$f, imm:$cond,
292 EFLAGS))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000293 def CMOV_V4F32 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000294 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000295 "#CMOV_V4F32 PSEUDO!",
296 [(set VR128:$dst,
Evan Cheng621216e2007-09-29 00:00:36 +0000297 (v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond,
298 EFLAGS)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000299 def CMOV_V2F64 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000300 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000301 "#CMOV_V2F64 PSEUDO!",
302 [(set VR128:$dst,
Evan Cheng621216e2007-09-29 00:00:36 +0000303 (v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
304 EFLAGS)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000305 def CMOV_V2I64 : I<0, Pseudo,
Evan Chengb783fa32007-07-19 01:14:50 +0000306 (outs VR128:$dst), (ins VR128:$t, VR128:$f, i8imm:$cond),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000307 "#CMOV_V2I64 PSEUDO!",
308 [(set VR128:$dst,
Evan Cheng621216e2007-09-29 00:00:36 +0000309 (v2i64 (X86cmov VR128:$t, VR128:$f, imm:$cond,
Evan Cheng950aac02007-09-25 01:57:46 +0000310 EFLAGS)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000311}
312
313//===----------------------------------------------------------------------===//
314// SSE1 Instructions
315//===----------------------------------------------------------------------===//
316
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000317// Move Instructions
Chris Lattnerd1a9eb62008-01-11 06:59:07 +0000318let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000319def MOVSSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000320 "movss\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +0000321let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000322def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000323 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000324 [(set FR32:$dst, (loadf32 addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000325def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000326 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000327 [(store FR32:$src, addr:$dst)]>;
328
329// Conversion instructions
Evan Chengb783fa32007-07-19 01:14:50 +0000330def CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000331 "cvttss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000332 [(set GR32:$dst, (fp_to_sint FR32:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000333def CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000334 "cvttss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000335 [(set GR32:$dst, (fp_to_sint (loadf32 addr:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000336def CVTSI2SSrr : SSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000337 "cvtsi2ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000338 [(set FR32:$dst, (sint_to_fp GR32:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000339def CVTSI2SSrm : SSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000340 "cvtsi2ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000341 [(set FR32:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
342
343// Match intrinsics which expect XMM operand(s).
Evan Chengb783fa32007-07-19 01:14:50 +0000344def Int_CVTSS2SIrr : SSI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000345 "cvtss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000346 [(set GR32:$dst, (int_x86_sse_cvtss2si VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000347def Int_CVTSS2SIrm : SSI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000348 "cvtss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000349 [(set GR32:$dst, (int_x86_sse_cvtss2si
350 (load addr:$src)))]>;
351
Dale Johannesen1fbb4a52007-10-30 22:15:38 +0000352// Match intrinisics which expect MM and XMM operand(s).
353def Int_CVTPS2PIrr : PSI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
354 "cvtps2pi\t{$src, $dst|$dst, $src}",
355 [(set VR64:$dst, (int_x86_sse_cvtps2pi VR128:$src))]>;
356def Int_CVTPS2PIrm : PSI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
357 "cvtps2pi\t{$src, $dst|$dst, $src}",
358 [(set VR64:$dst, (int_x86_sse_cvtps2pi
359 (load addr:$src)))]>;
360def Int_CVTTPS2PIrr: PSI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
361 "cvttps2pi\t{$src, $dst|$dst, $src}",
362 [(set VR64:$dst, (int_x86_sse_cvttps2pi VR128:$src))]>;
363def Int_CVTTPS2PIrm: PSI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
364 "cvttps2pi\t{$src, $dst|$dst, $src}",
365 [(set VR64:$dst, (int_x86_sse_cvttps2pi
366 (load addr:$src)))]>;
Evan Cheng3ea4d672008-03-05 08:19:16 +0000367let Constraints = "$src1 = $dst" in {
Dale Johannesen1fbb4a52007-10-30 22:15:38 +0000368 def Int_CVTPI2PSrr : PSI<0x2A, MRMSrcReg,
369 (outs VR128:$dst), (ins VR128:$src1, VR64:$src2),
370 "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
371 [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
372 VR64:$src2))]>;
373 def Int_CVTPI2PSrm : PSI<0x2A, MRMSrcMem,
374 (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2),
375 "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
376 [(set VR128:$dst, (int_x86_sse_cvtpi2ps VR128:$src1,
377 (load addr:$src2)))]>;
378}
379
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000380// Aliases for intrinsics
Evan Chengb783fa32007-07-19 01:14:50 +0000381def Int_CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000382 "cvttss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000383 [(set GR32:$dst,
384 (int_x86_sse_cvttss2si VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000385def Int_CVTTSS2SIrm : SSI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000386 "cvttss2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000387 [(set GR32:$dst,
388 (int_x86_sse_cvttss2si(load addr:$src)))]>;
389
Evan Cheng3ea4d672008-03-05 08:19:16 +0000390let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000391 def Int_CVTSI2SSrr : SSI<0x2A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000392 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000393 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000394 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
395 GR32:$src2))]>;
396 def Int_CVTSI2SSrm : SSI<0x2A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000397 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000398 "cvtsi2ss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000399 [(set VR128:$dst, (int_x86_sse_cvtsi2ss VR128:$src1,
400 (loadi32 addr:$src2)))]>;
401}
402
403// Comparison instructions
Evan Cheng3ea4d672008-03-05 08:19:16 +0000404let Constraints = "$src1 = $dst" in {
Chris Lattnerd1a9eb62008-01-11 06:59:07 +0000405let neverHasSideEffects = 1 in
Chris Lattnera9f545f2007-12-16 20:12:41 +0000406 def CMPSSrr : SSIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000407 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000408 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +0000409let neverHasSideEffects = 1, mayLoad = 1 in
Chris Lattnera9f545f2007-12-16 20:12:41 +0000410 def CMPSSrm : SSIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000411 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000412 "cmp${cc}ss\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000413}
414
Evan Cheng55687072007-09-14 21:48:26 +0000415let Defs = [EFLAGS] in {
Evan Chengb783fa32007-07-19 01:14:50 +0000416def UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000417 "ucomiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000418 [(X86cmp FR32:$src1, FR32:$src2), (implicit EFLAGS)]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000419def UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs), (ins FR32:$src1, f32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000420 "ucomiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000421 [(X86cmp FR32:$src1, (loadf32 addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +0000422 (implicit EFLAGS)]>;
Evan Cheng55687072007-09-14 21:48:26 +0000423} // Defs = [EFLAGS]
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000424
425// Aliases to match intrinsics which expect XMM operand(s).
Evan Cheng3ea4d672008-03-05 08:19:16 +0000426let Constraints = "$src1 = $dst" in {
Chris Lattnera9f545f2007-12-16 20:12:41 +0000427 def Int_CMPSSrr : SSIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000428 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000429 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000430 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
431 VR128:$src, imm:$cc))]>;
Chris Lattnera9f545f2007-12-16 20:12:41 +0000432 def Int_CMPSSrm : SSIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000433 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +0000434 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000435 [(set VR128:$dst, (int_x86_sse_cmp_ss VR128:$src1,
436 (load addr:$src), imm:$cc))]>;
437}
438
Evan Cheng55687072007-09-14 21:48:26 +0000439let Defs = [EFLAGS] in {
Evan Cheng621216e2007-09-29 00:00:36 +0000440def Int_UCOMISSrr: PSI<0x2E, MRMSrcReg, (outs),
Evan Cheng950aac02007-09-25 01:57:46 +0000441 (ins VR128:$src1, VR128:$src2),
442 "ucomiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000443 [(X86ucomi (v4f32 VR128:$src1), VR128:$src2),
Evan Cheng950aac02007-09-25 01:57:46 +0000444 (implicit EFLAGS)]>;
Evan Cheng621216e2007-09-29 00:00:36 +0000445def Int_UCOMISSrm: PSI<0x2E, MRMSrcMem, (outs),
Evan Cheng950aac02007-09-25 01:57:46 +0000446 (ins VR128:$src1, f128mem:$src2),
447 "ucomiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000448 [(X86ucomi (v4f32 VR128:$src1), (load addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +0000449 (implicit EFLAGS)]>;
450
Evan Cheng621216e2007-09-29 00:00:36 +0000451def Int_COMISSrr: PSI<0x2F, MRMSrcReg, (outs),
Evan Cheng950aac02007-09-25 01:57:46 +0000452 (ins VR128:$src1, VR128:$src2),
453 "comiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000454 [(X86comi (v4f32 VR128:$src1), VR128:$src2),
Evan Cheng950aac02007-09-25 01:57:46 +0000455 (implicit EFLAGS)]>;
Evan Cheng621216e2007-09-29 00:00:36 +0000456def Int_COMISSrm: PSI<0x2F, MRMSrcMem, (outs),
Evan Cheng950aac02007-09-25 01:57:46 +0000457 (ins VR128:$src1, f128mem:$src2),
458 "comiss\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +0000459 [(X86comi (v4f32 VR128:$src1), (load addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +0000460 (implicit EFLAGS)]>;
Evan Cheng55687072007-09-14 21:48:26 +0000461} // Defs = [EFLAGS]
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000462
463// Aliases of packed SSE1 instructions for scalar use. These all have names that
464// start with 'Fs'.
465
466// Alias instructions that map fld0 to pxor for sse.
Chris Lattner17dab4a2008-01-10 05:45:39 +0000467let isReMaterializable = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000468def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +0000469 "pxor\t$dst, $dst", [(set FR32:$dst, fp32imm0)]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000470 Requires<[HasSSE1]>, TB, OpSize;
471
472// Alias instruction to do FR32 reg-to-reg copy using movaps. Upper bits are
473// disregarded.
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000474let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000475def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000476 "movaps\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000477
478// Alias instruction to load FR32 from f128mem using movaps. Upper bits are
479// disregarded.
Chris Lattner1a1932c2008-01-06 23:38:27 +0000480let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000481def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000482 "movaps\t{$src, $dst|$dst, $src}",
Dan Gohman11821702007-07-27 17:16:43 +0000483 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000484
485// Alias bitwise logical operations using SSE logical ops on packed FP values.
Evan Cheng3ea4d672008-03-05 08:19:16 +0000486let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000487let isCommutable = 1 in {
Evan Chengb783fa32007-07-19 01:14:50 +0000488 def FsANDPSrr : PSI<0x54, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000489 "andps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000490 [(set FR32:$dst, (X86fand FR32:$src1, FR32:$src2))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000491 def FsORPSrr : PSI<0x56, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000492 "orps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000493 [(set FR32:$dst, (X86for FR32:$src1, FR32:$src2))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000494 def FsXORPSrr : PSI<0x57, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000495 "xorps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000496 [(set FR32:$dst, (X86fxor FR32:$src1, FR32:$src2))]>;
497}
498
Evan Chengb783fa32007-07-19 01:14:50 +0000499def FsANDPSrm : PSI<0x54, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000500 "andps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000501 [(set FR32:$dst, (X86fand FR32:$src1,
Dan Gohman11821702007-07-27 17:16:43 +0000502 (memopfsf32 addr:$src2)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000503def FsORPSrm : PSI<0x56, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000504 "orps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000505 [(set FR32:$dst, (X86for FR32:$src1,
Dan Gohman11821702007-07-27 17:16:43 +0000506 (memopfsf32 addr:$src2)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000507def FsXORPSrm : PSI<0x57, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000508 "xorps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000509 [(set FR32:$dst, (X86fxor FR32:$src1,
Dan Gohman11821702007-07-27 17:16:43 +0000510 (memopfsf32 addr:$src2)))]>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000511let neverHasSideEffects = 1 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000512def FsANDNPSrr : PSI<0x55, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000513 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000514 "andnps\t{$src2, $dst|$dst, $src2}", []>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000515
516let mayLoad = 1 in
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000517def FsANDNPSrm : PSI<0x55, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000518 (outs FR32:$dst), (ins FR32:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000519 "andnps\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000520}
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000521}
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000522
523/// basic_sse1_fp_binop_rm - SSE1 binops come in both scalar and vector forms.
524///
525/// In addition, we also have a special variant of the scalar form here to
526/// represent the associated intrinsic operation. This form is unlike the
527/// plain scalar form, in that it takes an entire vector (instead of a scalar)
528/// and leaves the top elements undefined.
529///
530/// These three forms can each be reg+reg or reg+mem, so there are a total of
531/// six "instructions".
532///
Evan Cheng3ea4d672008-03-05 08:19:16 +0000533let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000534multiclass basic_sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
535 SDNode OpNode, Intrinsic F32Int,
536 bit Commutable = 0> {
537 // Scalar operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000538 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000539 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000540 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
541 let isCommutable = Commutable;
542 }
543
544 // Scalar operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000545 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000546 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000547 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
548
549 // Vector operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000550 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000551 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000552 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
553 let isCommutable = Commutable;
554 }
555
556 // Vector operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000557 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000558 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +0000559 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000560
561 // Intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000562 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000563 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000564 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
565 let isCommutable = Commutable;
566 }
567
568 // Intrinsic operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000569 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000570 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000571 [(set VR128:$dst, (F32Int VR128:$src1,
572 sse_load_f32:$src2))]>;
573}
574}
575
576// Arithmetic instructions
577defm ADD : basic_sse1_fp_binop_rm<0x58, "add", fadd, int_x86_sse_add_ss, 1>;
578defm MUL : basic_sse1_fp_binop_rm<0x59, "mul", fmul, int_x86_sse_mul_ss, 1>;
579defm SUB : basic_sse1_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse_sub_ss>;
580defm DIV : basic_sse1_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse_div_ss>;
581
582/// sse1_fp_binop_rm - Other SSE1 binops
583///
584/// This multiclass is like basic_sse1_fp_binop_rm, with the addition of
585/// instructions for a full-vector intrinsic form. Operations that map
586/// onto C operators don't use this form since they just use the plain
587/// vector form instead of having a separate vector intrinsic form.
588///
589/// This provides a total of eight "instructions".
590///
Evan Cheng3ea4d672008-03-05 08:19:16 +0000591let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000592multiclass sse1_fp_binop_rm<bits<8> opc, string OpcodeStr,
593 SDNode OpNode,
594 Intrinsic F32Int,
595 Intrinsic V4F32Int,
596 bit Commutable = 0> {
597
598 // Scalar operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000599 def SSrr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000600 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000601 [(set FR32:$dst, (OpNode FR32:$src1, FR32:$src2))]> {
602 let isCommutable = Commutable;
603 }
604
605 // Scalar operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000606 def SSrm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000607 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000608 [(set FR32:$dst, (OpNode FR32:$src1, (load addr:$src2)))]>;
609
610 // Vector operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000611 def PSrr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000612 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000613 [(set VR128:$dst, (v4f32 (OpNode VR128:$src1, VR128:$src2)))]> {
614 let isCommutable = Commutable;
615 }
616
617 // Vector operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000618 def PSrm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000619 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +0000620 [(set VR128:$dst, (OpNode VR128:$src1, (memopv4f32 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000621
622 // Intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000623 def SSrr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000624 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000625 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2))]> {
626 let isCommutable = Commutable;
627 }
628
629 // Intrinsic operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000630 def SSrm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000631 !strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000632 [(set VR128:$dst, (F32Int VR128:$src1,
633 sse_load_f32:$src2))]>;
634
635 // Vector intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000636 def PSrr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000637 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000638 [(set VR128:$dst, (V4F32Int VR128:$src1, VR128:$src2))]> {
639 let isCommutable = Commutable;
640 }
641
642 // Vector intrinsic operation, reg+mem.
Dan Gohmanc747be52007-08-02 21:06:40 +0000643 def PSrm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000644 !strconcat(OpcodeStr, "ps\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000645 [(set VR128:$dst, (V4F32Int VR128:$src1, (load addr:$src2)))]>;
646}
647}
648
649defm MAX : sse1_fp_binop_rm<0x5F, "max", X86fmax,
650 int_x86_sse_max_ss, int_x86_sse_max_ps>;
651defm MIN : sse1_fp_binop_rm<0x5D, "min", X86fmin,
652 int_x86_sse_min_ss, int_x86_sse_min_ps>;
653
654//===----------------------------------------------------------------------===//
655// SSE packed FP Instructions
656
657// Move Instructions
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000658let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000659def MOVAPSrr : PSI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000660 "movaps\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +0000661let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000662def MOVAPSrm : PSI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000663 "movaps\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000664 [(set VR128:$dst, (alignedloadv4f32 addr:$src))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000665
Evan Chengb783fa32007-07-19 01:14:50 +0000666def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000667 "movaps\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000668 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000669
Chris Lattnerd1a9eb62008-01-11 06:59:07 +0000670let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000671def MOVUPSrr : PSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000672 "movups\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +0000673let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000674def MOVUPSrm : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000675 "movups\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000676 [(set VR128:$dst, (loadv4f32 addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000677def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000678 "movups\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000679 [(store (v4f32 VR128:$src), addr:$dst)]>;
680
681// Intrinsic forms of MOVUPS load and store
Chris Lattner1a1932c2008-01-06 23:38:27 +0000682let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000683def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000684 "movups\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000685 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000686def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000687 "movups\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +0000688 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000689
Evan Cheng3ea4d672008-03-05 08:19:16 +0000690let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000691 let AddedComplexity = 20 in {
692 def MOVLPSrm : PSI<0x12, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000693 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000694 "movlps\t{$src2, $dst|$dst, $src2}",
Evan Chengd743a5f2008-05-10 00:59:18 +0000695 [(set VR128:$dst,
696 (v4f32 (vector_shuffle VR128:$src1,
697 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
698 MOVLP_shuffle_mask)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000699 def MOVHPSrm : PSI<0x16, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000700 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000701 "movhps\t{$src2, $dst|$dst, $src2}",
Evan Chengd743a5f2008-05-10 00:59:18 +0000702 [(set VR128:$dst,
703 (v4f32 (vector_shuffle VR128:$src1,
704 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2)))),
705 MOVHP_shuffle_mask)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000706 } // AddedComplexity
Evan Cheng3ea4d672008-03-05 08:19:16 +0000707} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000708
Evan Chengd743a5f2008-05-10 00:59:18 +0000709
Evan Chengb783fa32007-07-19 01:14:50 +0000710def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000711 "movlps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000712 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
713 (iPTR 0))), addr:$dst)]>;
714
715// v2f64 extract element 1 is always custom lowered to unpack high to low
716// and extract element 0 so the non-store version isn't too horrible.
Evan Chengb783fa32007-07-19 01:14:50 +0000717def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000718 "movhps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000719 [(store (f64 (vector_extract
720 (v2f64 (vector_shuffle
721 (bc_v2f64 (v4f32 VR128:$src)), (undef),
722 UNPCKH_shuffle_mask)), (iPTR 0))),
723 addr:$dst)]>;
724
Evan Cheng3ea4d672008-03-05 08:19:16 +0000725let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000726let AddedComplexity = 15 in {
Evan Chengb783fa32007-07-19 01:14:50 +0000727def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000728 "movlhps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000729 [(set VR128:$dst,
730 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
731 MOVHP_shuffle_mask)))]>;
732
Evan Chengb783fa32007-07-19 01:14:50 +0000733def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000734 "movhlps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000735 [(set VR128:$dst,
736 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
737 MOVHLPS_shuffle_mask)))]>;
738} // AddedComplexity
Evan Cheng3ea4d672008-03-05 08:19:16 +0000739} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000740
741
742
743// Arithmetic
744
745/// sse1_fp_unop_rm - SSE1 unops come in both scalar and vector forms.
746///
747/// In addition, we also have a special variant of the scalar form here to
748/// represent the associated intrinsic operation. This form is unlike the
749/// plain scalar form, in that it takes an entire vector (instead of a
750/// scalar) and leaves the top elements undefined.
751///
752/// And, we have a special variant form for a full-vector intrinsic form.
753///
754/// These four forms can each have a reg or a mem operand, so there are a
755/// total of eight "instructions".
756///
757multiclass sse1_fp_unop_rm<bits<8> opc, string OpcodeStr,
758 SDNode OpNode,
759 Intrinsic F32Int,
760 Intrinsic V4F32Int,
761 bit Commutable = 0> {
762 // Scalar operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000763 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000764 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000765 [(set FR32:$dst, (OpNode FR32:$src))]> {
766 let isCommutable = Commutable;
767 }
768
769 // Scalar operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000770 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000771 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000772 [(set FR32:$dst, (OpNode (load addr:$src)))]>;
773
774 // Vector operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000775 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000776 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000777 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]> {
778 let isCommutable = Commutable;
779 }
780
781 // Vector operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000782 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000783 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +0000784 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000785
786 // Intrinsic operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +0000787 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000788 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000789 [(set VR128:$dst, (F32Int VR128:$src))]> {
790 let isCommutable = Commutable;
791 }
792
793 // Intrinsic operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +0000794 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000795 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000796 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
797
798 // Vector intrinsic operation, reg
Evan Chengb783fa32007-07-19 01:14:50 +0000799 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000800 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000801 [(set VR128:$dst, (V4F32Int VR128:$src))]> {
802 let isCommutable = Commutable;
803 }
804
805 // Vector intrinsic operation, mem
Dan Gohmanc747be52007-08-02 21:06:40 +0000806 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000807 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000808 [(set VR128:$dst, (V4F32Int (load addr:$src)))]>;
809}
810
811// Square root.
812defm SQRT : sse1_fp_unop_rm<0x51, "sqrt", fsqrt,
813 int_x86_sse_sqrt_ss, int_x86_sse_sqrt_ps>;
814
815// Reciprocal approximations. Note that these typically require refinement
816// in order to obtain suitable precision.
817defm RSQRT : sse1_fp_unop_rm<0x52, "rsqrt", X86frsqrt,
818 int_x86_sse_rsqrt_ss, int_x86_sse_rsqrt_ps>;
819defm RCP : sse1_fp_unop_rm<0x53, "rcp", X86frcp,
820 int_x86_sse_rcp_ss, int_x86_sse_rcp_ps>;
821
822// Logical
Evan Cheng3ea4d672008-03-05 08:19:16 +0000823let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000824 let isCommutable = 1 in {
825 def ANDPSrr : PSI<0x54, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000826 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000827 "andps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000828 [(set VR128:$dst, (v2i64
829 (and VR128:$src1, VR128:$src2)))]>;
830 def ORPSrr : PSI<0x56, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000831 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000832 "orps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000833 [(set VR128:$dst, (v2i64
834 (or VR128:$src1, VR128:$src2)))]>;
835 def XORPSrr : PSI<0x57, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000836 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000837 "xorps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000838 [(set VR128:$dst, (v2i64
839 (xor VR128:$src1, VR128:$src2)))]>;
840 }
841
842 def ANDPSrm : PSI<0x54, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000843 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000844 "andps\t{$src2, $dst|$dst, $src2}",
Evan Cheng8e92cd12007-07-19 23:34:10 +0000845 [(set VR128:$dst, (and (bc_v2i64 (v4f32 VR128:$src1)),
846 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000847 def ORPSrm : PSI<0x56, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000848 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000849 "orps\t{$src2, $dst|$dst, $src2}",
Evan Cheng8e92cd12007-07-19 23:34:10 +0000850 [(set VR128:$dst, (or (bc_v2i64 (v4f32 VR128:$src1)),
851 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000852 def XORPSrm : PSI<0x57, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000853 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000854 "xorps\t{$src2, $dst|$dst, $src2}",
Evan Cheng8e92cd12007-07-19 23:34:10 +0000855 [(set VR128:$dst, (xor (bc_v2i64 (v4f32 VR128:$src1)),
856 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000857 def ANDNPSrr : PSI<0x55, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000858 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000859 "andnps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000860 [(set VR128:$dst,
861 (v2i64 (and (xor VR128:$src1,
862 (bc_v2i64 (v4i32 immAllOnesV))),
863 VR128:$src2)))]>;
864 def ANDNPSrm : PSI<0x55, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000865 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000866 "andnps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000867 [(set VR128:$dst,
Evan Cheng8e92cd12007-07-19 23:34:10 +0000868 (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000869 (bc_v2i64 (v4i32 immAllOnesV))),
Evan Cheng8e92cd12007-07-19 23:34:10 +0000870 (memopv2i64 addr:$src2))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000871}
872
Evan Cheng3ea4d672008-03-05 08:19:16 +0000873let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000874 def CMPPSrri : PSIi8<0xC2, MRMSrcReg,
Nate Begeman061db5f2008-05-12 20:34:32 +0000875 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
876 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
877 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
878 VR128:$src, imm:$cc))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000879 def CMPPSrmi : PSIi8<0xC2, MRMSrcMem,
Nate Begeman061db5f2008-05-12 20:34:32 +0000880 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
881 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
882 [(set VR128:$dst, (int_x86_sse_cmp_ps VR128:$src1,
883 (load addr:$src), imm:$cc))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000884}
Nate Begeman061db5f2008-05-12 20:34:32 +0000885def : Pat<(v4i32 (vsetcc (v4f32 VR128:$src1), VR128:$src2, cond:$cc)),
886 (CMPPSrri VR128:$src1, VR128:$src2, (SSE_CC_imm cond:$cc))>;
887def : Pat<(v4i32 (vsetcc (v4f32 VR128:$src1), (memop addr:$src2), cond:$cc)),
888 (CMPPSrmi VR128:$src1, addr:$src2, (SSE_CC_imm cond:$cc))>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000889
890// Shuffle and unpack instructions
Evan Cheng3ea4d672008-03-05 08:19:16 +0000891let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000892 let isConvertibleToThreeAddress = 1 in // Convert to pshufd
893 def SHUFPSrri : PSIi8<0xC6, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000894 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000895 VR128:$src2, i32i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +0000896 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000897 [(set VR128:$dst,
898 (v4f32 (vector_shuffle
899 VR128:$src1, VR128:$src2,
900 SHUFP_shuffle_mask:$src3)))]>;
901 def SHUFPSrmi : PSIi8<0xC6, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000902 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000903 f128mem:$src2, i32i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +0000904 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000905 [(set VR128:$dst,
906 (v4f32 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +0000907 VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000908 SHUFP_shuffle_mask:$src3)))]>;
909
910 let AddedComplexity = 10 in {
911 def UNPCKHPSrr : PSI<0x15, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000912 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000913 "unpckhps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000914 [(set VR128:$dst,
915 (v4f32 (vector_shuffle
916 VR128:$src1, VR128:$src2,
917 UNPCKH_shuffle_mask)))]>;
918 def UNPCKHPSrm : PSI<0x15, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000919 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000920 "unpckhps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000921 [(set VR128:$dst,
922 (v4f32 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +0000923 VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000924 UNPCKH_shuffle_mask)))]>;
925
926 def UNPCKLPSrr : PSI<0x14, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000927 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000928 "unpcklps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000929 [(set VR128:$dst,
930 (v4f32 (vector_shuffle
931 VR128:$src1, VR128:$src2,
932 UNPCKL_shuffle_mask)))]>;
933 def UNPCKLPSrm : PSI<0x14, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000934 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000935 "unpcklps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000936 [(set VR128:$dst,
937 (v4f32 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +0000938 VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000939 UNPCKL_shuffle_mask)))]>;
940 } // AddedComplexity
Evan Cheng3ea4d672008-03-05 08:19:16 +0000941} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000942
943// Mask creation
Evan Chengb783fa32007-07-19 01:14:50 +0000944def MOVMSKPSrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000945 "movmskps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000946 [(set GR32:$dst, (int_x86_sse_movmsk_ps VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000947def MOVMSKPDrr : PSI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000948 "movmskpd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000949 [(set GR32:$dst, (int_x86_sse2_movmsk_pd VR128:$src))]>;
950
Evan Chengd1d68072008-03-08 00:58:38 +0000951// Prefetch intrinsic.
952def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
953 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
954def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
955 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
956def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
957 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
958def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
959 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000960
961// Non-temporal stores
Evan Chengb783fa32007-07-19 01:14:50 +0000962def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000963 "movntps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000964 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
965
966// Load, store, and memory fence
Evan Chengb783fa32007-07-19 01:14:50 +0000967def SFENCE : PSI<0xAE, MRM7m, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000968
969// MXCSR register
Evan Chengb783fa32007-07-19 01:14:50 +0000970def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000971 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000972def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
Dan Gohman91888f02007-07-31 20:11:57 +0000973 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000974
975// Alias instructions that map zero vector to pxor / xorp* for sse.
Chris Lattner17dab4a2008-01-10 05:45:39 +0000976let isReMaterializable = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000977def V_SET0 : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +0000978 "xorps\t$dst, $dst",
Chris Lattnere6aa3862007-11-25 00:24:49 +0000979 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000980
Evan Chenga15896e2008-03-12 07:02:50 +0000981let Predicates = [HasSSE1] in {
982 def : Pat<(v2i64 immAllZerosV), (V_SET0)>;
983 def : Pat<(v8i16 immAllZerosV), (V_SET0)>;
984 def : Pat<(v16i8 immAllZerosV), (V_SET0)>;
985 def : Pat<(v2f64 immAllZerosV), (V_SET0)>;
986 def : Pat<(v4f32 immAllZerosV), (V_SET0)>;
987}
988
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000989// FR32 to 128-bit vector conversion.
Evan Chengb783fa32007-07-19 01:14:50 +0000990def MOVSS2PSrr : SSI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000991 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000992 [(set VR128:$dst,
993 (v4f32 (scalar_to_vector FR32:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000994def MOVSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000995 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000996 [(set VR128:$dst,
997 (v4f32 (scalar_to_vector (loadf32 addr:$src))))]>;
998
999// FIXME: may not be able to eliminate this movss with coalescing the src and
1000// dest register classes are different. We really want to write this pattern
1001// like this:
1002// def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
1003// (f32 FR32:$src)>;
Evan Chengb783fa32007-07-19 01:14:50 +00001004def MOVPS2SSrr : SSI<0x10, MRMSrcReg, (outs FR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001005 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001006 [(set FR32:$dst, (vector_extract (v4f32 VR128:$src),
1007 (iPTR 0)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001008def MOVPS2SSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001009 "movss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001010 [(store (f32 (vector_extract (v4f32 VR128:$src),
1011 (iPTR 0))), addr:$dst)]>;
1012
1013
1014// Move to lower bits of a VR128, leaving upper bits alone.
1015// Three operand (but two address) aliases.
Evan Cheng3ea4d672008-03-05 08:19:16 +00001016let Constraints = "$src1 = $dst" in {
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001017let neverHasSideEffects = 1 in
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001018 def MOVLSS2PSrr : SSI<0x10, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001019 (outs VR128:$dst), (ins VR128:$src1, FR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001020 "movss\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001021
1022 let AddedComplexity = 15 in
1023 def MOVLPSrr : SSI<0x10, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001024 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001025 "movss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001026 [(set VR128:$dst,
1027 (v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
1028 MOVL_shuffle_mask)))]>;
1029}
1030
1031// Move to lower bits of a VR128 and zeroing upper bits.
1032// Loading from memory automatically zeroing upper bits.
1033let AddedComplexity = 20 in
Evan Chengb783fa32007-07-19 01:14:50 +00001034def MOVZSS2PSrm : SSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001035 "movss\t{$src, $dst|$dst, $src}",
Evan Chenge9b9c672008-05-09 21:53:03 +00001036 [(set VR128:$dst, (v4f32 (X86vzmovl (v4f32 (scalar_to_vector
Evan Cheng40ee6e52008-05-08 00:57:18 +00001037 (loadf32 addr:$src))))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001038
Evan Cheng056afe12008-05-20 18:24:47 +00001039def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
Evan Cheng40ee6e52008-05-08 00:57:18 +00001040 (MOVZSS2PSrm addr:$src)>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001041
1042//===----------------------------------------------------------------------===//
1043// SSE2 Instructions
1044//===----------------------------------------------------------------------===//
1045
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001046// Move Instructions
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001047let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001048def MOVSDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001049 "movsd\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +00001050let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001051def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001052 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001053 [(set FR64:$dst, (loadf64 addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001054def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001055 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001056 [(store FR64:$src, addr:$dst)]>;
1057
1058// Conversion instructions
Evan Chengb783fa32007-07-19 01:14:50 +00001059def CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001060 "cvttsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001061 [(set GR32:$dst, (fp_to_sint FR64:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001062def CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001063 "cvttsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001064 [(set GR32:$dst, (fp_to_sint (loadf64 addr:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001065def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001066 "cvtsd2ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001067 [(set FR32:$dst, (fround FR64:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001068def CVTSD2SSrm : SDI<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001069 "cvtsd2ss\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001070 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001071def CVTSI2SDrr : SDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001072 "cvtsi2sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001073 [(set FR64:$dst, (sint_to_fp GR32:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001074def CVTSI2SDrm : SDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001075 "cvtsi2sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001076 [(set FR64:$dst, (sint_to_fp (loadi32 addr:$src)))]>;
1077
1078// SSE2 instructions with XS prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001079def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001080 "cvtss2sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001081 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
1082 Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001083def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001084 "cvtss2sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001085 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
1086 Requires<[HasSSE2]>;
1087
1088// Match intrinsics which expect XMM operand(s).
Evan Chengb783fa32007-07-19 01:14:50 +00001089def Int_CVTSD2SIrr : SDI<0x2D, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001090 "cvtsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001091 [(set GR32:$dst, (int_x86_sse2_cvtsd2si VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001092def Int_CVTSD2SIrm : SDI<0x2D, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001093 "cvtsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001094 [(set GR32:$dst, (int_x86_sse2_cvtsd2si
1095 (load addr:$src)))]>;
1096
Dale Johannesen1fbb4a52007-10-30 22:15:38 +00001097// Match intrinisics which expect MM and XMM operand(s).
1098def Int_CVTPD2PIrr : PDI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
1099 "cvtpd2pi\t{$src, $dst|$dst, $src}",
1100 [(set VR64:$dst, (int_x86_sse_cvtpd2pi VR128:$src))]>;
1101def Int_CVTPD2PIrm : PDI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
1102 "cvtpd2pi\t{$src, $dst|$dst, $src}",
1103 [(set VR64:$dst, (int_x86_sse_cvtpd2pi
1104 (load addr:$src)))]>;
1105def Int_CVTTPD2PIrr: PDI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
1106 "cvttpd2pi\t{$src, $dst|$dst, $src}",
1107 [(set VR64:$dst, (int_x86_sse_cvttpd2pi VR128:$src))]>;
1108def Int_CVTTPD2PIrm: PDI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
1109 "cvttpd2pi\t{$src, $dst|$dst, $src}",
1110 [(set VR64:$dst, (int_x86_sse_cvttpd2pi
1111 (load addr:$src)))]>;
1112def Int_CVTPI2PDrr : PDI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
1113 "cvtpi2pd\t{$src, $dst|$dst, $src}",
1114 [(set VR128:$dst, (int_x86_sse_cvtpi2pd VR64:$src))]>;
1115def Int_CVTPI2PDrm : PDI<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1116 "cvtpi2pd\t{$src, $dst|$dst, $src}",
1117 [(set VR128:$dst, (int_x86_sse_cvtpi2pd
1118 (load addr:$src)))]>;
1119
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001120// Aliases for intrinsics
Evan Chengb783fa32007-07-19 01:14:50 +00001121def Int_CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001122 "cvttsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001123 [(set GR32:$dst,
1124 (int_x86_sse2_cvttsd2si VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001125def Int_CVTTSD2SIrm : SDI<0x2C, MRMSrcMem, (outs GR32:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001126 "cvttsd2si\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001127 [(set GR32:$dst, (int_x86_sse2_cvttsd2si
1128 (load addr:$src)))]>;
1129
1130// Comparison instructions
Evan Cheng3ea4d672008-03-05 08:19:16 +00001131let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
Evan Cheng653c7ac2007-12-20 19:57:09 +00001132 def CMPSDrr : SDIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001133 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001134 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001135let mayLoad = 1 in
Evan Cheng653c7ac2007-12-20 19:57:09 +00001136 def CMPSDrm : SDIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001137 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001138 "cmp${cc}sd\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001139}
1140
Evan Cheng950aac02007-09-25 01:57:46 +00001141let Defs = [EFLAGS] in {
Evan Chengb783fa32007-07-19 01:14:50 +00001142def UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001143 "ucomisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001144 [(X86cmp FR64:$src1, FR64:$src2), (implicit EFLAGS)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001145def UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs), (ins FR64:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001146 "ucomisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001147 [(X86cmp FR64:$src1, (loadf64 addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +00001148 (implicit EFLAGS)]>;
1149}
1150
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001151// Aliases to match intrinsics which expect XMM operand(s).
Evan Cheng3ea4d672008-03-05 08:19:16 +00001152let Constraints = "$src1 = $dst" in {
Evan Cheng653c7ac2007-12-20 19:57:09 +00001153 def Int_CMPSDrr : SDIi8<0xC2, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001154 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001155 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001156 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1157 VR128:$src, imm:$cc))]>;
Evan Cheng653c7ac2007-12-20 19:57:09 +00001158 def Int_CMPSDrm : SDIi8<0xC2, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001159 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src, SSECC:$cc),
Dan Gohman91888f02007-07-31 20:11:57 +00001160 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001161 [(set VR128:$dst, (int_x86_sse2_cmp_sd VR128:$src1,
1162 (load addr:$src), imm:$cc))]>;
1163}
1164
Evan Cheng950aac02007-09-25 01:57:46 +00001165let Defs = [EFLAGS] in {
Evan Chengb783fa32007-07-19 01:14:50 +00001166def Int_UCOMISDrr: PDI<0x2E, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001167 "ucomisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001168 [(X86ucomi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1169 (implicit EFLAGS)]>;
1170def Int_UCOMISDrm: PDI<0x2E, MRMSrcMem, (outs),(ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001171 "ucomisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001172 [(X86ucomi (v2f64 VR128:$src1), (load addr:$src2)),
1173 (implicit EFLAGS)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001174
Evan Chengb783fa32007-07-19 01:14:50 +00001175def Int_COMISDrr: PDI<0x2F, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001176 "comisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001177 [(X86comi (v2f64 VR128:$src1), (v2f64 VR128:$src2)),
1178 (implicit EFLAGS)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001179def Int_COMISDrm: PDI<0x2F, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001180 "comisd\t{$src2, $src1|$src1, $src2}",
Evan Cheng621216e2007-09-29 00:00:36 +00001181 [(X86comi (v2f64 VR128:$src1), (load addr:$src2)),
Evan Cheng950aac02007-09-25 01:57:46 +00001182 (implicit EFLAGS)]>;
1183} // Defs = EFLAGS]
1184
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001185// Aliases of packed SSE2 instructions for scalar use. These all have names that
1186// start with 'Fs'.
1187
1188// Alias instructions that map fld0 to pxor for sse.
Chris Lattner17dab4a2008-01-10 05:45:39 +00001189let isReMaterializable = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001190def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +00001191 "pxor\t$dst, $dst", [(set FR64:$dst, fpimm0)]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001192 Requires<[HasSSE2]>, TB, OpSize;
1193
1194// Alias instruction to do FR64 reg-to-reg copy using movapd. Upper bits are
1195// disregarded.
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001196let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001197def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001198 "movapd\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001199
1200// Alias instruction to load FR64 from f128mem using movapd. Upper bits are
1201// disregarded.
Chris Lattner1a1932c2008-01-06 23:38:27 +00001202let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001203def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001204 "movapd\t{$src, $dst|$dst, $src}",
Dan Gohman11821702007-07-27 17:16:43 +00001205 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001206
1207// Alias bitwise logical operations using SSE logical ops on packed FP values.
Evan Cheng3ea4d672008-03-05 08:19:16 +00001208let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001209let isCommutable = 1 in {
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001210 def FsANDPDrr : PDI<0x54, MRMSrcReg, (outs FR64:$dst),
1211 (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001212 "andpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001213 [(set FR64:$dst, (X86fand FR64:$src1, FR64:$src2))]>;
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001214 def FsORPDrr : PDI<0x56, MRMSrcReg, (outs FR64:$dst),
1215 (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001216 "orpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001217 [(set FR64:$dst, (X86for FR64:$src1, FR64:$src2))]>;
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001218 def FsXORPDrr : PDI<0x57, MRMSrcReg, (outs FR64:$dst),
1219 (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001220 "xorpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001221 [(set FR64:$dst, (X86fxor FR64:$src1, FR64:$src2))]>;
1222}
1223
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001224def FsANDPDrm : PDI<0x54, MRMSrcMem, (outs FR64:$dst),
1225 (ins FR64:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001226 "andpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001227 [(set FR64:$dst, (X86fand FR64:$src1,
Dan Gohman11821702007-07-27 17:16:43 +00001228 (memopfsf64 addr:$src2)))]>;
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001229def FsORPDrm : PDI<0x56, MRMSrcMem, (outs FR64:$dst),
1230 (ins FR64:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001231 "orpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001232 [(set FR64:$dst, (X86for FR64:$src1,
Dan Gohman11821702007-07-27 17:16:43 +00001233 (memopfsf64 addr:$src2)))]>;
Evan Cheng0e3e01d2008-05-02 07:53:32 +00001234def FsXORPDrm : PDI<0x57, MRMSrcMem, (outs FR64:$dst),
1235 (ins FR64:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001236 "xorpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001237 [(set FR64:$dst, (X86fxor FR64:$src1,
Dan Gohman11821702007-07-27 17:16:43 +00001238 (memopfsf64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001239
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001240let neverHasSideEffects = 1 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001241def FsANDNPDrr : PDI<0x55, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001242 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001243 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001244let mayLoad = 1 in
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001245def FsANDNPDrm : PDI<0x55, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001246 (outs FR64:$dst), (ins FR64:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001247 "andnpd\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001248}
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001249}
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001250
1251/// basic_sse2_fp_binop_rm - SSE2 binops come in both scalar and vector forms.
1252///
1253/// In addition, we also have a special variant of the scalar form here to
1254/// represent the associated intrinsic operation. This form is unlike the
1255/// plain scalar form, in that it takes an entire vector (instead of a scalar)
1256/// and leaves the top elements undefined.
1257///
1258/// These three forms can each be reg+reg or reg+mem, so there are a total of
1259/// six "instructions".
1260///
Evan Cheng3ea4d672008-03-05 08:19:16 +00001261let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001262multiclass basic_sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1263 SDNode OpNode, Intrinsic F64Int,
1264 bit Commutable = 0> {
1265 // Scalar operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001266 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001267 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001268 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1269 let isCommutable = Commutable;
1270 }
1271
1272 // Scalar operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001273 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001274 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001275 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1276
1277 // Vector operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001278 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001279 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001280 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1281 let isCommutable = Commutable;
1282 }
1283
1284 // Vector operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001285 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001286 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +00001287 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001288
1289 // Intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001290 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001291 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001292 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1293 let isCommutable = Commutable;
1294 }
1295
1296 // Intrinsic operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001297 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001298 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001299 [(set VR128:$dst, (F64Int VR128:$src1,
1300 sse_load_f64:$src2))]>;
1301}
1302}
1303
1304// Arithmetic instructions
1305defm ADD : basic_sse2_fp_binop_rm<0x58, "add", fadd, int_x86_sse2_add_sd, 1>;
1306defm MUL : basic_sse2_fp_binop_rm<0x59, "mul", fmul, int_x86_sse2_mul_sd, 1>;
1307defm SUB : basic_sse2_fp_binop_rm<0x5C, "sub", fsub, int_x86_sse2_sub_sd>;
1308defm DIV : basic_sse2_fp_binop_rm<0x5E, "div", fdiv, int_x86_sse2_div_sd>;
1309
1310/// sse2_fp_binop_rm - Other SSE2 binops
1311///
1312/// This multiclass is like basic_sse2_fp_binop_rm, with the addition of
1313/// instructions for a full-vector intrinsic form. Operations that map
1314/// onto C operators don't use this form since they just use the plain
1315/// vector form instead of having a separate vector intrinsic form.
1316///
1317/// This provides a total of eight "instructions".
1318///
Evan Cheng3ea4d672008-03-05 08:19:16 +00001319let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001320multiclass sse2_fp_binop_rm<bits<8> opc, string OpcodeStr,
1321 SDNode OpNode,
1322 Intrinsic F64Int,
1323 Intrinsic V2F64Int,
1324 bit Commutable = 0> {
1325
1326 // Scalar operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001327 def SDrr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001328 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001329 [(set FR64:$dst, (OpNode FR64:$src1, FR64:$src2))]> {
1330 let isCommutable = Commutable;
1331 }
1332
1333 // Scalar operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001334 def SDrm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001335 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001336 [(set FR64:$dst, (OpNode FR64:$src1, (load addr:$src2)))]>;
1337
1338 // Vector operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001339 def PDrr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001340 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001341 [(set VR128:$dst, (v2f64 (OpNode VR128:$src1, VR128:$src2)))]> {
1342 let isCommutable = Commutable;
1343 }
1344
1345 // Vector operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001346 def PDrm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001347 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +00001348 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2f64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001349
1350 // Intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001351 def SDrr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001352 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001353 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2))]> {
1354 let isCommutable = Commutable;
1355 }
1356
1357 // Intrinsic operation, reg+mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001358 def SDrm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001359 !strconcat(OpcodeStr, "sd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001360 [(set VR128:$dst, (F64Int VR128:$src1,
1361 sse_load_f64:$src2))]>;
1362
1363 // Vector intrinsic operation, reg+reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001364 def PDrr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001365 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001366 [(set VR128:$dst, (V2F64Int VR128:$src1, VR128:$src2))]> {
1367 let isCommutable = Commutable;
1368 }
1369
1370 // Vector intrinsic operation, reg+mem.
Dan Gohmanc747be52007-08-02 21:06:40 +00001371 def PDrm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001372 !strconcat(OpcodeStr, "pd\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001373 [(set VR128:$dst, (V2F64Int VR128:$src1, (load addr:$src2)))]>;
1374}
1375}
1376
1377defm MAX : sse2_fp_binop_rm<0x5F, "max", X86fmax,
1378 int_x86_sse2_max_sd, int_x86_sse2_max_pd>;
1379defm MIN : sse2_fp_binop_rm<0x5D, "min", X86fmin,
1380 int_x86_sse2_min_sd, int_x86_sse2_min_pd>;
1381
1382//===----------------------------------------------------------------------===//
1383// SSE packed FP Instructions
1384
1385// Move Instructions
Chris Lattnerc90ee9c2008-01-10 07:59:24 +00001386let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001387def MOVAPDrr : PDI<0x28, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001388 "movapd\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +00001389let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001390def MOVAPDrm : PDI<0x28, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001391 "movapd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001392 [(set VR128:$dst, (alignedloadv2f64 addr:$src))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001393
Evan Chengb783fa32007-07-19 01:14:50 +00001394def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001395 "movapd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001396 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001397
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001398let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001399def MOVUPDrr : PDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001400 "movupd\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +00001401let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001402def MOVUPDrm : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001403 "movupd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001404 [(set VR128:$dst, (loadv2f64 addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001405def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001406 "movupd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001407 [(store (v2f64 VR128:$src), addr:$dst)]>;
1408
1409// Intrinsic forms of MOVUPD load and store
Evan Chengb783fa32007-07-19 01:14:50 +00001410def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001411 "movupd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001412 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001413def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001414 "movupd\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001415 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001416
Evan Cheng3ea4d672008-03-05 08:19:16 +00001417let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001418 let AddedComplexity = 20 in {
1419 def MOVLPDrm : PDI<0x12, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001420 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001421 "movlpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001422 [(set VR128:$dst,
1423 (v2f64 (vector_shuffle VR128:$src1,
1424 (scalar_to_vector (loadf64 addr:$src2)),
1425 MOVLP_shuffle_mask)))]>;
1426 def MOVHPDrm : PDI<0x16, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001427 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001428 "movhpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001429 [(set VR128:$dst,
1430 (v2f64 (vector_shuffle VR128:$src1,
1431 (scalar_to_vector (loadf64 addr:$src2)),
1432 MOVHP_shuffle_mask)))]>;
1433 } // AddedComplexity
Evan Cheng3ea4d672008-03-05 08:19:16 +00001434} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001435
Evan Chengb783fa32007-07-19 01:14:50 +00001436def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001437 "movlpd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001438 [(store (f64 (vector_extract (v2f64 VR128:$src),
1439 (iPTR 0))), addr:$dst)]>;
1440
1441// v2f64 extract element 1 is always custom lowered to unpack high to low
1442// and extract element 0 so the non-store version isn't too horrible.
Evan Chengb783fa32007-07-19 01:14:50 +00001443def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001444 "movhpd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001445 [(store (f64 (vector_extract
1446 (v2f64 (vector_shuffle VR128:$src, (undef),
1447 UNPCKH_shuffle_mask)), (iPTR 0))),
1448 addr:$dst)]>;
1449
1450// SSE2 instructions without OpSize prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001451def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001452 "cvtdq2ps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001453 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1454 TB, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001455def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Evan Cheng14c97c32008-03-14 07:46:48 +00001456 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1457 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1458 (bitconvert (memopv2i64 addr:$src))))]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001459 TB, Requires<[HasSSE2]>;
1460
1461// SSE2 instructions with XS prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001462def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001463 "cvtdq2pd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001464 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1465 XS, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001466def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
Evan Cheng14c97c32008-03-14 07:46:48 +00001467 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1468 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1469 (bitconvert (memopv2i64 addr:$src))))]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001470 XS, Requires<[HasSSE2]>;
1471
Evan Chengb783fa32007-07-19 01:14:50 +00001472def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Evan Cheng14c97c32008-03-14 07:46:48 +00001473 "cvtps2dq\t{$src, $dst|$dst, $src}",
1474 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001475def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001476 "cvtps2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001477 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1478 (load addr:$src)))]>;
1479// SSE2 packed instructions with XS prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001480def Int_CVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001481 "cvttps2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001482 [(set VR128:$dst, (int_x86_sse2_cvttps2dq VR128:$src))]>,
1483 XS, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001484def Int_CVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001485 "cvttps2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001486 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1487 (load addr:$src)))]>,
1488 XS, Requires<[HasSSE2]>;
1489
1490// SSE2 packed instructions with XD prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001491def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001492 "cvtpd2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001493 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1494 XD, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001495def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001496 "cvtpd2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001497 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1498 (load addr:$src)))]>,
1499 XD, Requires<[HasSSE2]>;
1500
Evan Chengb783fa32007-07-19 01:14:50 +00001501def Int_CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001502 "cvttpd2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001503 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
Evan Cheng14c97c32008-03-14 07:46:48 +00001504def Int_CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001505 "cvttpd2dq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001506 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1507 (load addr:$src)))]>;
1508
1509// SSE2 instructions without OpSize prefix
Evan Chengb783fa32007-07-19 01:14:50 +00001510def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001511 "cvtps2pd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001512 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1513 TB, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001514def Int_CVTPS2PDrm : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001515 "cvtps2pd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001516 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1517 (load addr:$src)))]>,
1518 TB, Requires<[HasSSE2]>;
1519
Evan Chengb783fa32007-07-19 01:14:50 +00001520def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001521 "cvtpd2ps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001522 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001523def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001524 "cvtpd2ps\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001525 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1526 (load addr:$src)))]>;
1527
1528// Match intrinsics which expect XMM operand(s).
1529// Aliases for intrinsics
Evan Cheng3ea4d672008-03-05 08:19:16 +00001530let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001531def Int_CVTSI2SDrr: SDI<0x2A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001532 (outs VR128:$dst), (ins VR128:$src1, GR32:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001533 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001534 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1535 GR32:$src2))]>;
1536def Int_CVTSI2SDrm: SDI<0x2A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001537 (outs VR128:$dst), (ins VR128:$src1, i32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001538 "cvtsi2sd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001539 [(set VR128:$dst, (int_x86_sse2_cvtsi2sd VR128:$src1,
1540 (loadi32 addr:$src2)))]>;
1541def Int_CVTSD2SSrr: SDI<0x5A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001542 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001543 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001544 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1545 VR128:$src2))]>;
1546def Int_CVTSD2SSrm: SDI<0x5A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001547 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001548 "cvtsd2ss\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001549 [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1,
1550 (load addr:$src2)))]>;
1551def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001552 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001553 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001554 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1555 VR128:$src2))]>, XS,
1556 Requires<[HasSSE2]>;
1557def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001558 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001559 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001560 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1561 (load addr:$src2)))]>, XS,
1562 Requires<[HasSSE2]>;
1563}
1564
1565// Arithmetic
1566
1567/// sse2_fp_unop_rm - SSE2 unops come in both scalar and vector forms.
1568///
1569/// In addition, we also have a special variant of the scalar form here to
1570/// represent the associated intrinsic operation. This form is unlike the
1571/// plain scalar form, in that it takes an entire vector (instead of a
1572/// scalar) and leaves the top elements undefined.
1573///
1574/// And, we have a special variant form for a full-vector intrinsic form.
1575///
1576/// These four forms can each have a reg or a mem operand, so there are a
1577/// total of eight "instructions".
1578///
1579multiclass sse2_fp_unop_rm<bits<8> opc, string OpcodeStr,
1580 SDNode OpNode,
1581 Intrinsic F64Int,
1582 Intrinsic V2F64Int,
1583 bit Commutable = 0> {
1584 // Scalar operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001585 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001586 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001587 [(set FR64:$dst, (OpNode FR64:$src))]> {
1588 let isCommutable = Commutable;
1589 }
1590
1591 // Scalar operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001592 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001593 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001594 [(set FR64:$dst, (OpNode (load addr:$src)))]>;
1595
1596 // Vector operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001597 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001598 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001599 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]> {
1600 let isCommutable = Commutable;
1601 }
1602
1603 // Vector operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001604 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001605 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +00001606 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001607
1608 // Intrinsic operation, reg.
Evan Chengb783fa32007-07-19 01:14:50 +00001609 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001610 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001611 [(set VR128:$dst, (F64Int VR128:$src))]> {
1612 let isCommutable = Commutable;
1613 }
1614
1615 // Intrinsic operation, mem.
Evan Chengb783fa32007-07-19 01:14:50 +00001616 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001617 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001618 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1619
1620 // Vector intrinsic operation, reg
Evan Chengb783fa32007-07-19 01:14:50 +00001621 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001622 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001623 [(set VR128:$dst, (V2F64Int VR128:$src))]> {
1624 let isCommutable = Commutable;
1625 }
1626
1627 // Vector intrinsic operation, mem
Dan Gohmanc747be52007-08-02 21:06:40 +00001628 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001629 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001630 [(set VR128:$dst, (V2F64Int (load addr:$src)))]>;
1631}
1632
1633// Square root.
1634defm SQRT : sse2_fp_unop_rm<0x51, "sqrt", fsqrt,
1635 int_x86_sse2_sqrt_sd, int_x86_sse2_sqrt_pd>;
1636
1637// There is no f64 version of the reciprocal approximation instructions.
1638
1639// Logical
Evan Cheng3ea4d672008-03-05 08:19:16 +00001640let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001641 let isCommutable = 1 in {
1642 def ANDPDrr : PDI<0x54, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001643 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001644 "andpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001645 [(set VR128:$dst,
1646 (and (bc_v2i64 (v2f64 VR128:$src1)),
1647 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1648 def ORPDrr : PDI<0x56, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001649 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001650 "orpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001651 [(set VR128:$dst,
1652 (or (bc_v2i64 (v2f64 VR128:$src1)),
1653 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1654 def XORPDrr : PDI<0x57, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001655 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001656 "xorpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001657 [(set VR128:$dst,
1658 (xor (bc_v2i64 (v2f64 VR128:$src1)),
1659 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1660 }
1661
1662 def ANDPDrm : PDI<0x54, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001663 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001664 "andpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001665 [(set VR128:$dst,
1666 (and (bc_v2i64 (v2f64 VR128:$src1)),
Evan Cheng8e92cd12007-07-19 23:34:10 +00001667 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001668 def ORPDrm : PDI<0x56, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001669 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001670 "orpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001671 [(set VR128:$dst,
1672 (or (bc_v2i64 (v2f64 VR128:$src1)),
Evan Cheng8e92cd12007-07-19 23:34:10 +00001673 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001674 def XORPDrm : PDI<0x57, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001675 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001676 "xorpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001677 [(set VR128:$dst,
1678 (xor (bc_v2i64 (v2f64 VR128:$src1)),
Evan Cheng8e92cd12007-07-19 23:34:10 +00001679 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001680 def ANDNPDrr : PDI<0x55, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001681 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001682 "andnpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001683 [(set VR128:$dst,
1684 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1685 (bc_v2i64 (v2f64 VR128:$src2))))]>;
1686 def ANDNPDrm : PDI<0x55, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001687 (outs VR128:$dst), (ins VR128:$src1,f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001688 "andnpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001689 [(set VR128:$dst,
1690 (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
Evan Cheng8e92cd12007-07-19 23:34:10 +00001691 (memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001692}
1693
Evan Cheng3ea4d672008-03-05 08:19:16 +00001694let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001695 def CMPPDrri : PDIi8<0xC2, MRMSrcReg,
Evan Cheng14c97c32008-03-14 07:46:48 +00001696 (outs VR128:$dst), (ins VR128:$src1, VR128:$src, SSECC:$cc),
1697 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1698 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
Nate Begeman061db5f2008-05-12 20:34:32 +00001699 VR128:$src, imm:$cc))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001700 def CMPPDrmi : PDIi8<0xC2, MRMSrcMem,
Evan Cheng14c97c32008-03-14 07:46:48 +00001701 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src, SSECC:$cc),
1702 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1703 [(set VR128:$dst, (int_x86_sse2_cmp_pd VR128:$src1,
Nate Begeman061db5f2008-05-12 20:34:32 +00001704 (load addr:$src), imm:$cc))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001705}
Nate Begeman061db5f2008-05-12 20:34:32 +00001706def : Pat<(v2i64 (vsetcc (v2f64 VR128:$src1), VR128:$src2, cond:$cc)),
1707 (CMPPDrri VR128:$src1, VR128:$src2, (SSE_CC_imm cond:$cc))>;
1708def : Pat<(v2i64 (vsetcc (v2f64 VR128:$src1), (memop addr:$src2), cond:$cc)),
1709 (CMPPDrmi VR128:$src1, addr:$src2, (SSE_CC_imm cond:$cc))>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001710
1711// Shuffle and unpack instructions
Evan Cheng3ea4d672008-03-05 08:19:16 +00001712let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001713 def SHUFPDrri : PDIi8<0xC6, MRMSrcReg,
Evan Cheng14c97c32008-03-14 07:46:48 +00001714 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i8imm:$src3),
1715 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1716 [(set VR128:$dst, (v2f64 (vector_shuffle
1717 VR128:$src1, VR128:$src2,
1718 SHUFP_shuffle_mask:$src3)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001719 def SHUFPDrmi : PDIi8<0xC6, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001720 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001721 f128mem:$src2, i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +00001722 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001723 [(set VR128:$dst,
1724 (v2f64 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +00001725 VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001726 SHUFP_shuffle_mask:$src3)))]>;
1727
1728 let AddedComplexity = 10 in {
1729 def UNPCKHPDrr : PDI<0x15, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001730 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001731 "unpckhpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001732 [(set VR128:$dst,
1733 (v2f64 (vector_shuffle
1734 VR128:$src1, VR128:$src2,
1735 UNPCKH_shuffle_mask)))]>;
1736 def UNPCKHPDrm : PDI<0x15, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001737 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001738 "unpckhpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001739 [(set VR128:$dst,
1740 (v2f64 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +00001741 VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001742 UNPCKH_shuffle_mask)))]>;
1743
1744 def UNPCKLPDrr : PDI<0x14, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001745 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001746 "unpcklpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001747 [(set VR128:$dst,
1748 (v2f64 (vector_shuffle
1749 VR128:$src1, VR128:$src2,
1750 UNPCKL_shuffle_mask)))]>;
1751 def UNPCKLPDrm : PDI<0x14, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001752 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001753 "unpcklpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001754 [(set VR128:$dst,
1755 (v2f64 (vector_shuffle
Dan Gohman7dc19012007-08-02 21:17:01 +00001756 VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001757 UNPCKL_shuffle_mask)))]>;
1758 } // AddedComplexity
Evan Cheng3ea4d672008-03-05 08:19:16 +00001759} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001760
1761
1762//===----------------------------------------------------------------------===//
1763// SSE integer instructions
1764
1765// Move Instructions
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001766let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001767def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001768 "movdqa\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001769let isSimpleLoad = 1, mayLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001770def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001771 "movdqa\t{$src, $dst|$dst, $src}",
Evan Cheng51a49b22007-07-20 00:27:43 +00001772 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001773let mayStore = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001774def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001775 "movdqa\t{$src, $dst|$dst, $src}",
Evan Cheng51a49b22007-07-20 00:27:43 +00001776 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001777let isSimpleLoad = 1, mayLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001778def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001779 "movdqu\t{$src, $dst|$dst, $src}",
Evan Cheng51a49b22007-07-20 00:27:43 +00001780 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001781 XS, Requires<[HasSSE2]>;
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00001782let mayStore = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001783def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001784 "movdqu\t{$src, $dst|$dst, $src}",
Evan Cheng51a49b22007-07-20 00:27:43 +00001785 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001786 XS, Requires<[HasSSE2]>;
1787
Dan Gohman4a4f1512007-07-18 20:23:34 +00001788// Intrinsic forms of MOVDQU load and store
Chris Lattner1a1932c2008-01-06 23:38:27 +00001789let isSimpleLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00001790def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001791 "movdqu\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001792 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
1793 XS, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00001794def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00001795 "movdqu\t{$src, $dst|$dst, $src}",
Dan Gohman4a4f1512007-07-18 20:23:34 +00001796 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
1797 XS, Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001798
Evan Cheng88004752008-03-05 08:11:27 +00001799let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001800
1801multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
1802 bit Commutable = 0> {
Evan Chengb783fa32007-07-19 01:14:50 +00001803 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001804 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001805 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]> {
1806 let isCommutable = Commutable;
1807 }
Evan Chengb783fa32007-07-19 01:14:50 +00001808 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001809 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001810 [(set VR128:$dst, (IntId VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00001811 (bitconvert (memopv2i64 addr:$src2))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001812}
1813
Evan Chengf90f8f82008-05-03 00:52:09 +00001814multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
1815 string OpcodeStr,
1816 Intrinsic IntId, Intrinsic IntId2> {
1817 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1818 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1819 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
1820 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
1821 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1822 [(set VR128:$dst, (IntId VR128:$src1,
1823 (bitconvert (memopv2i64 addr:$src2))))]>;
1824 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
1825 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
1826 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
1827}
1828
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001829/// PDI_binop_rm - Simple SSE2 binary operator.
1830multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
1831 ValueType OpVT, bit Commutable = 0> {
Evan Chengb783fa32007-07-19 01:14:50 +00001832 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001833 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001834 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]> {
1835 let isCommutable = Commutable;
1836 }
Evan Chengb783fa32007-07-19 01:14:50 +00001837 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001838 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001839 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00001840 (bitconvert (memopv2i64 addr:$src2)))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001841}
1842
1843/// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
1844///
1845/// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
1846/// to collapse (bitconvert VT to VT) into its operand.
1847///
1848multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
1849 bit Commutable = 0> {
Evan Chengb783fa32007-07-19 01:14:50 +00001850 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001851 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001852 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]> {
1853 let isCommutable = Commutable;
1854 }
Evan Chengb783fa32007-07-19 01:14:50 +00001855 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001856 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohman4a4f1512007-07-18 20:23:34 +00001857 [(set VR128:$dst, (OpNode VR128:$src1,(memopv2i64 addr:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001858}
1859
Evan Cheng3ea4d672008-03-05 08:19:16 +00001860} // Constraints = "$src1 = $dst"
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001861
1862// 128-bit Integer Arithmetic
1863
1864defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
1865defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
1866defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
1867defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
1868
1869defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
1870defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
1871defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
1872defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
1873
1874defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
1875defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
1876defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
1877defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
1878
1879defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
1880defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
1881defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
1882defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
1883
1884defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
1885
1886defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
1887defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w , 1>;
1888defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
1889
1890defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
1891
1892defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
1893defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
1894
1895
1896defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
1897defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
1898defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
1899defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
1900defm PSADBW : PDI_binop_rm_int<0xE0, "psadbw", int_x86_sse2_psad_bw, 1>;
1901
1902
Evan Chengf90f8f82008-05-03 00:52:09 +00001903defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
1904 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
1905defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
1906 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
1907defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
1908 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001909
Evan Chengf90f8f82008-05-03 00:52:09 +00001910defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
1911 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
1912defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
1913 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
Nate Begemanc2ca5f62008-05-13 17:52:09 +00001914defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
Evan Chengf90f8f82008-05-03 00:52:09 +00001915 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001916
Evan Chengf90f8f82008-05-03 00:52:09 +00001917defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
1918 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
Nate Begemand66fc342008-05-13 01:47:52 +00001919defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
Evan Chengf90f8f82008-05-03 00:52:09 +00001920 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001921
1922// 128-bit logical shifts.
Evan Cheng3ea4d672008-03-05 08:19:16 +00001923let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001924 def PSLLDQri : PDIi8<0x73, MRM7r,
Evan Chengb783fa32007-07-19 01:14:50 +00001925 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001926 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001927 def PSRLDQri : PDIi8<0x73, MRM3r,
Evan Chengb783fa32007-07-19 01:14:50 +00001928 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001929 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001930 // PSRADQri doesn't exist in SSE[1-3].
1931}
1932
1933let Predicates = [HasSSE2] in {
1934 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
1935 (v2i64 (PSLLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1936 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
1937 (v2i64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1938 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
1939 (v2f64 (PSRLDQri VR128:$src1, (PSxLDQ_imm imm:$src2)))>;
1940}
1941
1942// Logical
1943defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
1944defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or , 1>;
1945defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
1946
Evan Cheng3ea4d672008-03-05 08:19:16 +00001947let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001948 def PANDNrr : PDI<0xDF, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00001949 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001950 "pandn\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001951 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
1952 VR128:$src2)))]>;
1953
1954 def PANDNrm : PDI<0xDF, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00001955 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00001956 "pandn\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001957 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
Dan Gohman7dc19012007-08-02 21:17:01 +00001958 (memopv2i64 addr:$src2))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001959}
1960
1961// SSE2 Integer comparison
1962defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b>;
1963defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w>;
1964defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d>;
1965defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
1966defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
1967defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
1968
Nate Begeman78ca4f92008-05-12 23:09:43 +00001969def : Pat<(v16i8 (vsetcc (v16i8 VR128:$src1), VR128:$src2, SETEQ)),
1970 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
1971def : Pat<(v16i8 (vsetcc (v16i8 VR128:$src1), (memop addr:$src2), SETEQ)),
1972 (PCMPEQBrm VR128:$src1, addr:$src2)>;
1973def : Pat<(v8i16 (vsetcc (v8i16 VR128:$src1), VR128:$src2, SETEQ)),
1974 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
1975def : Pat<(v8i16 (vsetcc (v8i16 VR128:$src1), (memop addr:$src2), SETEQ)),
1976 (PCMPEQWrm VR128:$src1, addr:$src2)>;
1977def : Pat<(v4i32 (vsetcc (v4i32 VR128:$src1), VR128:$src2, SETEQ)),
1978 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
1979def : Pat<(v4i32 (vsetcc (v4i32 VR128:$src1), (memop addr:$src2), SETEQ)),
1980 (PCMPEQDrm VR128:$src1, addr:$src2)>;
1981
1982def : Pat<(v16i8 (vsetcc (v16i8 VR128:$src1), VR128:$src2, SETGT)),
1983 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
1984def : Pat<(v16i8 (vsetcc (v16i8 VR128:$src1), (memop addr:$src2), SETGT)),
1985 (PCMPGTBrm VR128:$src1, addr:$src2)>;
1986def : Pat<(v8i16 (vsetcc (v8i16 VR128:$src1), VR128:$src2, SETGT)),
1987 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
1988def : Pat<(v8i16 (vsetcc (v8i16 VR128:$src1), (memop addr:$src2), SETGT)),
1989 (PCMPGTWrm VR128:$src1, addr:$src2)>;
1990def : Pat<(v4i32 (vsetcc (v4i32 VR128:$src1), VR128:$src2, SETGT)),
1991 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
1992def : Pat<(v4i32 (vsetcc (v4i32 VR128:$src1), (memop addr:$src2), SETGT)),
1993 (PCMPGTDrm VR128:$src1, addr:$src2)>;
1994
1995
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001996// Pack instructions
1997defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
1998defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
1999defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2000
2001// Shuffle and unpack instructions
2002def PSHUFDri : PDIi8<0x70, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002003 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002004 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002005 [(set VR128:$dst, (v4i32 (vector_shuffle
2006 VR128:$src1, (undef),
2007 PSHUFD_shuffle_mask:$src2)))]>;
2008def PSHUFDmi : PDIi8<0x70, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002009 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002010 "pshufd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002011 [(set VR128:$dst, (v4i32 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00002012 (bc_v4i32(memopv2i64 addr:$src1)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002013 (undef),
2014 PSHUFD_shuffle_mask:$src2)))]>;
2015
2016// SSE2 with ImmT == Imm8 and XS prefix.
2017def PSHUFHWri : Ii8<0x70, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002018 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002019 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002020 [(set VR128:$dst, (v8i16 (vector_shuffle
2021 VR128:$src1, (undef),
2022 PSHUFHW_shuffle_mask:$src2)))]>,
2023 XS, Requires<[HasSSE2]>;
2024def PSHUFHWmi : Ii8<0x70, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002025 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002026 "pshufhw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002027 [(set VR128:$dst, (v8i16 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00002028 (bc_v8i16 (memopv2i64 addr:$src1)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002029 (undef),
2030 PSHUFHW_shuffle_mask:$src2)))]>,
2031 XS, Requires<[HasSSE2]>;
2032
2033// SSE2 with ImmT == Imm8 and XD prefix.
2034def PSHUFLWri : Ii8<0x70, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002035 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002036 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002037 [(set VR128:$dst, (v8i16 (vector_shuffle
2038 VR128:$src1, (undef),
2039 PSHUFLW_shuffle_mask:$src2)))]>,
2040 XD, Requires<[HasSSE2]>;
2041def PSHUFLWmi : Ii8<0x70, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002042 (outs VR128:$dst), (ins i128mem:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002043 "pshuflw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002044 [(set VR128:$dst, (v8i16 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00002045 (bc_v8i16 (memopv2i64 addr:$src1)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002046 (undef),
2047 PSHUFLW_shuffle_mask:$src2)))]>,
2048 XD, Requires<[HasSSE2]>;
2049
2050
Evan Cheng3ea4d672008-03-05 08:19:16 +00002051let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002052 def PUNPCKLBWrr : PDI<0x60, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002053 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002054 "punpcklbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002055 [(set VR128:$dst,
2056 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
2057 UNPCKL_shuffle_mask)))]>;
2058 def PUNPCKLBWrm : PDI<0x60, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002059 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002060 "punpcklbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002061 [(set VR128:$dst,
2062 (v16i8 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002063 (bc_v16i8 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002064 UNPCKL_shuffle_mask)))]>;
2065 def PUNPCKLWDrr : PDI<0x61, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002066 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002067 "punpcklwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002068 [(set VR128:$dst,
2069 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
2070 UNPCKL_shuffle_mask)))]>;
2071 def PUNPCKLWDrm : PDI<0x61, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002072 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002073 "punpcklwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002074 [(set VR128:$dst,
2075 (v8i16 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002076 (bc_v8i16 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002077 UNPCKL_shuffle_mask)))]>;
2078 def PUNPCKLDQrr : PDI<0x62, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002079 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002080 "punpckldq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002081 [(set VR128:$dst,
2082 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2083 UNPCKL_shuffle_mask)))]>;
2084 def PUNPCKLDQrm : PDI<0x62, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002085 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002086 "punpckldq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002087 [(set VR128:$dst,
2088 (v4i32 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002089 (bc_v4i32 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002090 UNPCKL_shuffle_mask)))]>;
2091 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002092 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002093 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002094 [(set VR128:$dst,
2095 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2096 UNPCKL_shuffle_mask)))]>;
2097 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002098 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002099 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002100 [(set VR128:$dst,
2101 (v2i64 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002102 (memopv2i64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002103 UNPCKL_shuffle_mask)))]>;
2104
2105 def PUNPCKHBWrr : PDI<0x68, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002106 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002107 "punpckhbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002108 [(set VR128:$dst,
2109 (v16i8 (vector_shuffle VR128:$src1, VR128:$src2,
2110 UNPCKH_shuffle_mask)))]>;
2111 def PUNPCKHBWrm : PDI<0x68, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002112 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002113 "punpckhbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002114 [(set VR128:$dst,
2115 (v16i8 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002116 (bc_v16i8 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002117 UNPCKH_shuffle_mask)))]>;
2118 def PUNPCKHWDrr : PDI<0x69, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002119 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002120 "punpckhwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002121 [(set VR128:$dst,
2122 (v8i16 (vector_shuffle VR128:$src1, VR128:$src2,
2123 UNPCKH_shuffle_mask)))]>;
2124 def PUNPCKHWDrm : PDI<0x69, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002125 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002126 "punpckhwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002127 [(set VR128:$dst,
2128 (v8i16 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002129 (bc_v8i16 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002130 UNPCKH_shuffle_mask)))]>;
2131 def PUNPCKHDQrr : PDI<0x6A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002132 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002133 "punpckhdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002134 [(set VR128:$dst,
2135 (v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2136 UNPCKH_shuffle_mask)))]>;
2137 def PUNPCKHDQrm : PDI<0x6A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002138 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002139 "punpckhdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002140 [(set VR128:$dst,
2141 (v4i32 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002142 (bc_v4i32 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002143 UNPCKH_shuffle_mask)))]>;
2144 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002145 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002146 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002147 [(set VR128:$dst,
2148 (v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2149 UNPCKH_shuffle_mask)))]>;
2150 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002151 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002152 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002153 [(set VR128:$dst,
2154 (v2i64 (vector_shuffle VR128:$src1,
Dan Gohman4a4f1512007-07-18 20:23:34 +00002155 (memopv2i64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002156 UNPCKH_shuffle_mask)))]>;
2157}
2158
2159// Extract / Insert
2160def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002161 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002162 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002163 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
Nate Begemand77e59e2008-02-11 04:19:36 +00002164 imm:$src2))]>;
Evan Cheng3ea4d672008-03-05 08:19:16 +00002165let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002166 def PINSRWrri : PDIi8<0xC4, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002167 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002168 GR32:$src2, i32i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +00002169 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002170 [(set VR128:$dst,
Nate Begemand77e59e2008-02-11 04:19:36 +00002171 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002172 def PINSRWrmi : PDIi8<0xC4, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002173 (outs VR128:$dst), (ins VR128:$src1,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002174 i16mem:$src2, i32i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +00002175 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Nate Begemand77e59e2008-02-11 04:19:36 +00002176 [(set VR128:$dst,
2177 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2178 imm:$src3))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002179}
2180
2181// Mask creation
Evan Chengb783fa32007-07-19 01:14:50 +00002182def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002183 "pmovmskb\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002184 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2185
2186// Conditional store
Evan Cheng6e4d1d92007-09-11 19:55:27 +00002187let Uses = [EDI] in
Evan Chengb783fa32007-07-19 01:14:50 +00002188def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
Dan Gohman91888f02007-07-31 20:11:57 +00002189 "maskmovdqu\t{$mask, $src|$src, $mask}",
Evan Cheng6e4d1d92007-09-11 19:55:27 +00002190 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002191
2192// Non-temporal stores
Evan Chengb783fa32007-07-19 01:14:50 +00002193def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002194 "movntpd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002195 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002196def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002197 "movntdq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002198 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002199def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002200 "movnti\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002201 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2202 TB, Requires<[HasSSE2]>;
2203
2204// Flush cache
Evan Chengb783fa32007-07-19 01:14:50 +00002205def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002206 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002207 TB, Requires<[HasSSE2]>;
2208
2209// Load, store, and memory fence
Evan Chengb783fa32007-07-19 01:14:50 +00002210def LFENCE : I<0xAE, MRM5m, (outs), (ins),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002211 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002212def MFENCE : I<0xAE, MRM6m, (outs), (ins),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002213 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
2214
Andrew Lenharth785610d2008-02-16 01:24:58 +00002215//TODO: custom lower this so as to never even generate the noop
2216def : Pat<(membarrier (i8 imm:$ll), (i8 imm:$ls), (i8 imm:$sl), (i8 imm:$ss),
2217 (i8 0)), (NOOP)>;
2218def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>;
2219def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>;
2220def : Pat<(membarrier (i8 imm:$ll), (i8 imm:$ls), (i8 imm:$sl), (i8 imm:$ss),
2221 (i8 1)), (MFENCE)>;
2222
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002223// Alias instructions that map zero vector to pxor / xorp* for sse.
Chris Lattner17dab4a2008-01-10 05:45:39 +00002224let isReMaterializable = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +00002225 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +00002226 "pcmpeqd\t$dst, $dst",
Chris Lattnere6aa3862007-11-25 00:24:49 +00002227 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002228
2229// FR64 to 128-bit vector conversion.
Evan Chengb783fa32007-07-19 01:14:50 +00002230def MOVSD2PDrr : SDI<0x10, MRMSrcReg, (outs VR128:$dst), (ins FR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002231 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002232 [(set VR128:$dst,
2233 (v2f64 (scalar_to_vector FR64:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002234def MOVSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002235 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002236 [(set VR128:$dst,
2237 (v2f64 (scalar_to_vector (loadf64 addr:$src))))]>;
2238
Evan Chengb783fa32007-07-19 01:14:50 +00002239def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002240 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002241 [(set VR128:$dst,
2242 (v4i32 (scalar_to_vector GR32:$src)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002243def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002244 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002245 [(set VR128:$dst,
2246 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2247
Evan Chengb783fa32007-07-19 01:14:50 +00002248def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002249 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002250 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2251
Evan Chengb783fa32007-07-19 01:14:50 +00002252def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002253 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002254 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2255
2256// SSE2 instructions with XS prefix
Evan Chengb783fa32007-07-19 01:14:50 +00002257def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002258 "movq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002259 [(set VR128:$dst,
2260 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
2261 Requires<[HasSSE2]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002262def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002263 "movq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002264 [(store (i64 (vector_extract (v2i64 VR128:$src),
2265 (iPTR 0))), addr:$dst)]>;
2266
2267// FIXME: may not be able to eliminate this movss with coalescing the src and
2268// dest register classes are different. We really want to write this pattern
2269// like this:
2270// def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2271// (f32 FR32:$src)>;
Evan Chengb783fa32007-07-19 01:14:50 +00002272def MOVPD2SDrr : SDI<0x10, MRMSrcReg, (outs FR64:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002273 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002274 [(set FR64:$dst, (vector_extract (v2f64 VR128:$src),
2275 (iPTR 0)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002276def MOVPD2SDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002277 "movsd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002278 [(store (f64 (vector_extract (v2f64 VR128:$src),
2279 (iPTR 0))), addr:$dst)]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002280def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002281 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002282 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2283 (iPTR 0)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002284def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002285 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002286 [(store (i32 (vector_extract (v4i32 VR128:$src),
2287 (iPTR 0))), addr:$dst)]>;
2288
Evan Chengb783fa32007-07-19 01:14:50 +00002289def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002290 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002291 [(set GR32:$dst, (bitconvert FR32:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002292def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002293 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002294 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
2295
2296
2297// Move to lower bits of a VR128, leaving upper bits alone.
2298// Three operand (but two address) aliases.
Evan Cheng3ea4d672008-03-05 08:19:16 +00002299let Constraints = "$src1 = $dst" in {
Chris Lattnerd1a9eb62008-01-11 06:59:07 +00002300 let neverHasSideEffects = 1 in
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002301 def MOVLSD2PDrr : SDI<0x10, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002302 (outs VR128:$dst), (ins VR128:$src1, FR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002303 "movsd\t{$src2, $dst|$dst, $src2}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002304
2305 let AddedComplexity = 15 in
2306 def MOVLPDrr : SDI<0x10, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002307 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002308 "movsd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002309 [(set VR128:$dst,
2310 (v2f64 (vector_shuffle VR128:$src1, VR128:$src2,
2311 MOVL_shuffle_mask)))]>;
2312}
2313
2314// Store / copy lower 64-bits of a XMM register.
Evan Chengb783fa32007-07-19 01:14:50 +00002315def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002316 "movq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002317 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
2318
2319// Move to lower bits of a VR128 and zeroing upper bits.
2320// Loading from memory automatically zeroing upper bits.
Evan Chengd743a5f2008-05-10 00:59:18 +00002321let AddedComplexity = 20 in {
2322def MOVZSD2PDrm : SDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
2323 "movsd\t{$src, $dst|$dst, $src}",
2324 [(set VR128:$dst,
2325 (v2f64 (X86vzmovl (v2f64 (scalar_to_vector
2326 (loadf64 addr:$src))))))]>;
Evan Cheng40ee6e52008-05-08 00:57:18 +00002327
Evan Cheng056afe12008-05-20 18:24:47 +00002328def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
2329 (MOVZSD2PDrm addr:$src)>;
2330def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
Evan Chengd743a5f2008-05-10 00:59:18 +00002331 (MOVZSD2PDrm addr:$src)>;
Evan Chenge9b9c672008-05-09 21:53:03 +00002332def : Pat<(v2f64 (X86vzload addr:$src)), (MOVZSD2PDrm addr:$src)>;
Evan Chengd743a5f2008-05-10 00:59:18 +00002333}
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002334
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002335// movd / movq to XMM register zero-extends
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002336let AddedComplexity = 15 in {
Evan Chengb783fa32007-07-19 01:14:50 +00002337def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002338 "movd\t{$src, $dst|$dst, $src}",
Evan Chenge9b9c672008-05-09 21:53:03 +00002339 [(set VR128:$dst, (v4i32 (X86vzmovl
Evan Cheng40ee6e52008-05-08 00:57:18 +00002340 (v4i32 (scalar_to_vector GR32:$src)))))]>;
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002341// This is X86-64 only.
2342def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2343 "mov{d|q}\t{$src, $dst|$dst, $src}",
Evan Chenge9b9c672008-05-09 21:53:03 +00002344 [(set VR128:$dst, (v2i64 (X86vzmovl
Evan Cheng40ee6e52008-05-08 00:57:18 +00002345 (v2i64 (scalar_to_vector GR64:$src)))))]>;
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002346}
2347
2348let AddedComplexity = 20 in {
Evan Chengb783fa32007-07-19 01:14:50 +00002349def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002350 "movd\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002351 [(set VR128:$dst,
Evan Chenge9b9c672008-05-09 21:53:03 +00002352 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
Evan Cheng40ee6e52008-05-08 00:57:18 +00002353 (loadi32 addr:$src))))))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002354def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002355 "movq\t{$src, $dst|$dst, $src}",
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002356 [(set VR128:$dst,
Evan Chenge9b9c672008-05-09 21:53:03 +00002357 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
Evan Cheng40ee6e52008-05-08 00:57:18 +00002358 (loadi64 addr:$src))))))]>, XS,
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002359 Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002360
Evan Chenge9b9c672008-05-09 21:53:03 +00002361def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
Evan Chengd743a5f2008-05-10 00:59:18 +00002362}
Evan Chenge9b9c672008-05-09 21:53:03 +00002363
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002364// Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
2365// IA32 document. movq xmm1, xmm2 does clear the high bits.
2366let AddedComplexity = 15 in
2367def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2368 "movq\t{$src, $dst|$dst, $src}",
Evan Chenge9b9c672008-05-09 21:53:03 +00002369 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002370 XS, Requires<[HasSSE2]>;
2371
Evan Cheng056afe12008-05-20 18:24:47 +00002372let AddedComplexity = 20 in {
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002373def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2374 "movq\t{$src, $dst|$dst, $src}",
Evan Chenge9b9c672008-05-09 21:53:03 +00002375 [(set VR128:$dst, (v2i64 (X86vzmovl
Evan Cheng056afe12008-05-20 18:24:47 +00002376 (loadv2i64 addr:$src))))]>,
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002377 XS, Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002378
Evan Cheng056afe12008-05-20 18:24:47 +00002379def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
2380 (MOVZPQILo2PQIrm addr:$src)>;
2381}
2382
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002383//===----------------------------------------------------------------------===//
2384// SSE3 Instructions
2385//===----------------------------------------------------------------------===//
2386
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002387// Move Instructions
Evan Chengb783fa32007-07-19 01:14:50 +00002388def MOVSHDUPrr : S3SI<0x16, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002389 "movshdup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002390 [(set VR128:$dst, (v4f32 (vector_shuffle
2391 VR128:$src, (undef),
2392 MOVSHDUP_shuffle_mask)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002393def MOVSHDUPrm : S3SI<0x16, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002394 "movshdup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002395 [(set VR128:$dst, (v4f32 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00002396 (memopv4f32 addr:$src), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002397 MOVSHDUP_shuffle_mask)))]>;
2398
Evan Chengb783fa32007-07-19 01:14:50 +00002399def MOVSLDUPrr : S3SI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002400 "movsldup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002401 [(set VR128:$dst, (v4f32 (vector_shuffle
2402 VR128:$src, (undef),
2403 MOVSLDUP_shuffle_mask)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002404def MOVSLDUPrm : S3SI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002405 "movsldup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002406 [(set VR128:$dst, (v4f32 (vector_shuffle
Dan Gohman4a4f1512007-07-18 20:23:34 +00002407 (memopv4f32 addr:$src), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002408 MOVSLDUP_shuffle_mask)))]>;
2409
Evan Chengb783fa32007-07-19 01:14:50 +00002410def MOVDDUPrr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002411 "movddup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002412 [(set VR128:$dst, (v2f64 (vector_shuffle
2413 VR128:$src, (undef),
2414 SSE_splat_lo_mask)))]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002415def MOVDDUPrm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002416 "movddup\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002417 [(set VR128:$dst,
2418 (v2f64 (vector_shuffle
2419 (scalar_to_vector (loadf64 addr:$src)),
2420 (undef),
2421 SSE_splat_lo_mask)))]>;
2422
2423// Arithmetic
Evan Cheng3ea4d672008-03-05 08:19:16 +00002424let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002425 def ADDSUBPSrr : S3DI<0xD0, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002426 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002427 "addsubps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002428 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
2429 VR128:$src2))]>;
2430 def ADDSUBPSrm : S3DI<0xD0, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002431 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002432 "addsubps\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002433 [(set VR128:$dst, (int_x86_sse3_addsub_ps VR128:$src1,
2434 (load addr:$src2)))]>;
2435 def ADDSUBPDrr : S3I<0xD0, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +00002436 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002437 "addsubpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002438 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
2439 VR128:$src2))]>;
2440 def ADDSUBPDrm : S3I<0xD0, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +00002441 (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002442 "addsubpd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002443 [(set VR128:$dst, (int_x86_sse3_addsub_pd VR128:$src1,
2444 (load addr:$src2)))]>;
2445}
2446
Evan Chengb783fa32007-07-19 01:14:50 +00002447def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +00002448 "lddqu\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002449 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
2450
2451// Horizontal ops
2452class S3D_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
Evan Chengb783fa32007-07-19 01:14:50 +00002453 : S3DI<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002454 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002455 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>;
2456class S3D_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
Evan Chengb783fa32007-07-19 01:14:50 +00002457 : S3DI<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002458 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002459 [(set VR128:$dst, (v4f32 (IntId VR128:$src1, (load addr:$src2))))]>;
2460class S3_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId>
Evan Chengb783fa32007-07-19 01:14:50 +00002461 : S3I<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002462 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002463 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>;
2464class S3_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId>
Evan Chengb783fa32007-07-19 01:14:50 +00002465 : S3I<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +00002466 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002467 [(set VR128:$dst, (v2f64 (IntId VR128:$src1, (load addr:$src2))))]>;
2468
Evan Cheng3ea4d672008-03-05 08:19:16 +00002469let Constraints = "$src1 = $dst" in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002470 def HADDPSrr : S3D_Intrr<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2471 def HADDPSrm : S3D_Intrm<0x7C, "haddps", int_x86_sse3_hadd_ps>;
2472 def HADDPDrr : S3_Intrr <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2473 def HADDPDrm : S3_Intrm <0x7C, "haddpd", int_x86_sse3_hadd_pd>;
2474 def HSUBPSrr : S3D_Intrr<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2475 def HSUBPSrm : S3D_Intrm<0x7D, "hsubps", int_x86_sse3_hsub_ps>;
2476 def HSUBPDrr : S3_Intrr <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2477 def HSUBPDrm : S3_Intrm <0x7D, "hsubpd", int_x86_sse3_hsub_pd>;
2478}
2479
2480// Thread synchronization
Evan Chengb783fa32007-07-19 01:14:50 +00002481def MONITOR : I<0xC8, RawFrm, (outs), (ins), "monitor",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002482 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
Evan Chengb783fa32007-07-19 01:14:50 +00002483def MWAIT : I<0xC9, RawFrm, (outs), (ins), "mwait",
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002484 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
2485
2486// vector_shuffle v1, <undef> <1, 1, 3, 3>
2487let AddedComplexity = 15 in
2488def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2489 MOVSHDUP_shuffle_mask)),
2490 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2491let AddedComplexity = 20 in
Dan Gohman4a4f1512007-07-18 20:23:34 +00002492def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002493 MOVSHDUP_shuffle_mask)),
2494 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
2495
2496// vector_shuffle v1, <undef> <0, 0, 2, 2>
2497let AddedComplexity = 15 in
2498 def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2499 MOVSLDUP_shuffle_mask)),
2500 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
2501let AddedComplexity = 20 in
Dan Gohman4a4f1512007-07-18 20:23:34 +00002502 def : Pat<(v4i32 (vector_shuffle (bc_v4i32 (memopv2i64 addr:$src)), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002503 MOVSLDUP_shuffle_mask)),
2504 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
2505
2506//===----------------------------------------------------------------------===//
2507// SSSE3 Instructions
2508//===----------------------------------------------------------------------===//
2509
Bill Wendling98680292007-08-10 06:22:27 +00002510/// SS3I_unop_rm_int_8 - Simple SSSE3 unary operator whose type is v*i8.
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002511multiclass SS3I_unop_rm_int_8<bits<8> opc, string OpcodeStr,
2512 Intrinsic IntId64, Intrinsic IntId128> {
2513 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
2514 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2515 [(set VR64:$dst, (IntId64 VR64:$src))]>;
Bill Wendling98680292007-08-10 06:22:27 +00002516
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002517 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
2518 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2519 [(set VR64:$dst,
2520 (IntId64 (bitconvert (memopv8i8 addr:$src))))]>;
2521
2522 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2523 (ins VR128:$src),
2524 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2525 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2526 OpSize;
2527
2528 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2529 (ins i128mem:$src),
2530 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2531 [(set VR128:$dst,
2532 (IntId128
2533 (bitconvert (memopv16i8 addr:$src))))]>, OpSize;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002534}
2535
Bill Wendling98680292007-08-10 06:22:27 +00002536/// SS3I_unop_rm_int_16 - Simple SSSE3 unary operator whose type is v*i16.
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002537multiclass SS3I_unop_rm_int_16<bits<8> opc, string OpcodeStr,
2538 Intrinsic IntId64, Intrinsic IntId128> {
2539 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2540 (ins VR64:$src),
2541 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2542 [(set VR64:$dst, (IntId64 VR64:$src))]>;
Bill Wendling98680292007-08-10 06:22:27 +00002543
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002544 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2545 (ins i64mem:$src),
2546 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2547 [(set VR64:$dst,
2548 (IntId64
2549 (bitconvert (memopv4i16 addr:$src))))]>;
2550
2551 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2552 (ins VR128:$src),
2553 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2554 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2555 OpSize;
2556
2557 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2558 (ins i128mem:$src),
2559 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2560 [(set VR128:$dst,
2561 (IntId128
2562 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
Bill Wendling98680292007-08-10 06:22:27 +00002563}
2564
2565/// SS3I_unop_rm_int_32 - Simple SSSE3 unary operator whose type is v*i32.
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002566multiclass SS3I_unop_rm_int_32<bits<8> opc, string OpcodeStr,
2567 Intrinsic IntId64, Intrinsic IntId128> {
2568 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2569 (ins VR64:$src),
2570 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2571 [(set VR64:$dst, (IntId64 VR64:$src))]>;
Bill Wendling98680292007-08-10 06:22:27 +00002572
Nate Begeman9a58b8a2008-02-09 23:46:37 +00002573 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2574 (ins i64mem:$src),
2575 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2576 [(set VR64:$dst,
2577 (IntId64
2578 (bitconvert (memopv2i32 addr:$src))))]>;
2579
2580 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2581 (ins VR128:$src),
2582 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2583 [(set VR128:$dst, (IntId128 VR128:$src))]>,
2584 OpSize;
2585
2586 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2587 (ins i128mem:$src),
2588 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
2589 [(set VR128:$dst,
2590 (IntId128
2591 (bitconvert (memopv4i32 addr:$src))))]>, OpSize;
Bill Wendling98680292007-08-10 06:22:27 +00002592}
2593
2594defm PABSB : SS3I_unop_rm_int_8 <0x1C, "pabsb",
2595 int_x86_ssse3_pabs_b,
2596 int_x86_ssse3_pabs_b_128>;
2597defm PABSW : SS3I_unop_rm_int_16<0x1D, "pabsw",
2598 int_x86_ssse3_pabs_w,
2599 int_x86_ssse3_pabs_w_128>;
2600defm PABSD : SS3I_unop_rm_int_32<0x1E, "pabsd",
2601 int_x86_ssse3_pabs_d,
2602 int_x86_ssse3_pabs_d_128>;
2603
2604/// SS3I_binop_rm_int_8 - Simple SSSE3 binary operator whose type is v*i8.
Evan Cheng3ea4d672008-03-05 08:19:16 +00002605let Constraints = "$src1 = $dst" in {
Bill Wendling98680292007-08-10 06:22:27 +00002606 multiclass SS3I_binop_rm_int_8<bits<8> opc, string OpcodeStr,
2607 Intrinsic IntId64, Intrinsic IntId128,
2608 bit Commutable = 0> {
2609 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2610 (ins VR64:$src1, VR64:$src2),
2611 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2612 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2613 let isCommutable = Commutable;
2614 }
2615 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2616 (ins VR64:$src1, i64mem:$src2),
2617 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2618 [(set VR64:$dst,
2619 (IntId64 VR64:$src1,
2620 (bitconvert (memopv8i8 addr:$src2))))]>;
2621
2622 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2623 (ins VR128:$src1, VR128:$src2),
2624 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2625 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2626 OpSize {
2627 let isCommutable = Commutable;
2628 }
2629 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2630 (ins VR128:$src1, i128mem:$src2),
2631 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2632 [(set VR128:$dst,
2633 (IntId128 VR128:$src1,
2634 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
2635 }
2636}
2637
2638/// SS3I_binop_rm_int_16 - Simple SSSE3 binary operator whose type is v*i16.
Evan Cheng3ea4d672008-03-05 08:19:16 +00002639let Constraints = "$src1 = $dst" in {
Bill Wendling98680292007-08-10 06:22:27 +00002640 multiclass SS3I_binop_rm_int_16<bits<8> opc, string OpcodeStr,
2641 Intrinsic IntId64, Intrinsic IntId128,
2642 bit Commutable = 0> {
2643 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2644 (ins VR64:$src1, VR64:$src2),
2645 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2646 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2647 let isCommutable = Commutable;
2648 }
2649 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2650 (ins VR64:$src1, i64mem:$src2),
2651 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2652 [(set VR64:$dst,
2653 (IntId64 VR64:$src1,
2654 (bitconvert (memopv4i16 addr:$src2))))]>;
2655
2656 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2657 (ins VR128:$src1, VR128:$src2),
2658 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2659 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2660 OpSize {
2661 let isCommutable = Commutable;
2662 }
2663 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2664 (ins VR128:$src1, i128mem:$src2),
2665 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2666 [(set VR128:$dst,
2667 (IntId128 VR128:$src1,
2668 (bitconvert (memopv8i16 addr:$src2))))]>, OpSize;
2669 }
2670}
2671
2672/// SS3I_binop_rm_int_32 - Simple SSSE3 binary operator whose type is v*i32.
Evan Cheng3ea4d672008-03-05 08:19:16 +00002673let Constraints = "$src1 = $dst" in {
Bill Wendling98680292007-08-10 06:22:27 +00002674 multiclass SS3I_binop_rm_int_32<bits<8> opc, string OpcodeStr,
2675 Intrinsic IntId64, Intrinsic IntId128,
2676 bit Commutable = 0> {
2677 def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst),
2678 (ins VR64:$src1, VR64:$src2),
2679 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2680 [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]> {
2681 let isCommutable = Commutable;
2682 }
2683 def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst),
2684 (ins VR64:$src1, i64mem:$src2),
2685 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2686 [(set VR64:$dst,
2687 (IntId64 VR64:$src1,
2688 (bitconvert (memopv2i32 addr:$src2))))]>;
2689
2690 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
2691 (ins VR128:$src1, VR128:$src2),
2692 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2693 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
2694 OpSize {
2695 let isCommutable = Commutable;
2696 }
2697 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
2698 (ins VR128:$src1, i128mem:$src2),
2699 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2700 [(set VR128:$dst,
2701 (IntId128 VR128:$src1,
2702 (bitconvert (memopv4i32 addr:$src2))))]>, OpSize;
2703 }
2704}
2705
2706defm PHADDW : SS3I_binop_rm_int_16<0x01, "phaddw",
2707 int_x86_ssse3_phadd_w,
2708 int_x86_ssse3_phadd_w_128, 1>;
2709defm PHADDD : SS3I_binop_rm_int_32<0x02, "phaddd",
2710 int_x86_ssse3_phadd_d,
2711 int_x86_ssse3_phadd_d_128, 1>;
2712defm PHADDSW : SS3I_binop_rm_int_16<0x03, "phaddsw",
2713 int_x86_ssse3_phadd_sw,
2714 int_x86_ssse3_phadd_sw_128, 1>;
2715defm PHSUBW : SS3I_binop_rm_int_16<0x05, "phsubw",
2716 int_x86_ssse3_phsub_w,
2717 int_x86_ssse3_phsub_w_128>;
2718defm PHSUBD : SS3I_binop_rm_int_32<0x06, "phsubd",
2719 int_x86_ssse3_phsub_d,
2720 int_x86_ssse3_phsub_d_128>;
2721defm PHSUBSW : SS3I_binop_rm_int_16<0x07, "phsubsw",
2722 int_x86_ssse3_phsub_sw,
2723 int_x86_ssse3_phsub_sw_128>;
2724defm PMADDUBSW : SS3I_binop_rm_int_8 <0x04, "pmaddubsw",
2725 int_x86_ssse3_pmadd_ub_sw,
2726 int_x86_ssse3_pmadd_ub_sw_128, 1>;
2727defm PMULHRSW : SS3I_binop_rm_int_16<0x0B, "pmulhrsw",
2728 int_x86_ssse3_pmul_hr_sw,
2729 int_x86_ssse3_pmul_hr_sw_128, 1>;
2730defm PSHUFB : SS3I_binop_rm_int_8 <0x00, "pshufb",
2731 int_x86_ssse3_pshuf_b,
2732 int_x86_ssse3_pshuf_b_128>;
2733defm PSIGNB : SS3I_binop_rm_int_8 <0x08, "psignb",
2734 int_x86_ssse3_psign_b,
2735 int_x86_ssse3_psign_b_128>;
2736defm PSIGNW : SS3I_binop_rm_int_16<0x09, "psignw",
2737 int_x86_ssse3_psign_w,
2738 int_x86_ssse3_psign_w_128>;
2739defm PSIGND : SS3I_binop_rm_int_32<0x09, "psignd",
2740 int_x86_ssse3_psign_d,
2741 int_x86_ssse3_psign_d_128>;
2742
Evan Cheng3ea4d672008-03-05 08:19:16 +00002743let Constraints = "$src1 = $dst" in {
Bill Wendling1dc817c2007-08-10 09:00:17 +00002744 def PALIGNR64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
2745 (ins VR64:$src1, VR64:$src2, i16imm:$src3),
Dale Johannesen576b27e2007-10-11 20:58:37 +00002746 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Bill Wendling1dc817c2007-08-10 09:00:17 +00002747 [(set VR64:$dst,
2748 (int_x86_ssse3_palign_r
2749 VR64:$src1, VR64:$src2,
2750 imm:$src3))]>;
2751 def PALIGNR64rm : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
2752 (ins VR64:$src1, i64mem:$src2, i16imm:$src3),
Dale Johannesen576b27e2007-10-11 20:58:37 +00002753 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Bill Wendling1dc817c2007-08-10 09:00:17 +00002754 [(set VR64:$dst,
2755 (int_x86_ssse3_palign_r
2756 VR64:$src1,
2757 (bitconvert (memopv2i32 addr:$src2)),
2758 imm:$src3))]>;
Bill Wendling98680292007-08-10 06:22:27 +00002759
Bill Wendling1dc817c2007-08-10 09:00:17 +00002760 def PALIGNR128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
2761 (ins VR128:$src1, VR128:$src2, i32imm:$src3),
Dale Johannesen576b27e2007-10-11 20:58:37 +00002762 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Bill Wendling1dc817c2007-08-10 09:00:17 +00002763 [(set VR128:$dst,
2764 (int_x86_ssse3_palign_r_128
2765 VR128:$src1, VR128:$src2,
2766 imm:$src3))]>, OpSize;
2767 def PALIGNR128rm : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
2768 (ins VR128:$src1, i128mem:$src2, i32imm:$src3),
Dale Johannesen576b27e2007-10-11 20:58:37 +00002769 "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Bill Wendling1dc817c2007-08-10 09:00:17 +00002770 [(set VR128:$dst,
2771 (int_x86_ssse3_palign_r_128
2772 VR128:$src1,
2773 (bitconvert (memopv4i32 addr:$src2)),
2774 imm:$src3))]>, OpSize;
Bill Wendling98680292007-08-10 06:22:27 +00002775}
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002776
2777//===----------------------------------------------------------------------===//
2778// Non-Instruction Patterns
2779//===----------------------------------------------------------------------===//
2780
Chris Lattnerdec9cb52008-01-24 08:07:48 +00002781// extload f32 -> f64. This matches load+fextend because we have a hack in
2782// the isel (PreprocessForFPConvert) that can introduce loads after dag combine.
2783// Since these loads aren't folded into the fextend, we have to match it
2784// explicitly here.
2785let Predicates = [HasSSE2] in
2786 def : Pat<(fextend (loadf32 addr:$src)),
2787 (CVTSS2SDrm addr:$src)>;
2788
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002789// bit_convert
2790let Predicates = [HasSSE2] in {
2791 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
2792 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
2793 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
2794 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
2795 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
2796 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
2797 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
2798 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
2799 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
2800 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
2801 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
2802 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
2803 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
2804 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
2805 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
2806 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
2807 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
2808 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
2809 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
2810 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
2811 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
2812 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
2813 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
2814 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
2815 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
2816 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
2817 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
2818 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
2819 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
2820 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
2821}
2822
2823// Move scalar to XMM zero-extended
2824// movd to XMM register zero-extends
2825let AddedComplexity = 15 in {
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002826// Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
Evan Chenge9b9c672008-05-09 21:53:03 +00002827def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002828 (MOVLSD2PDrr (V_SET0), FR64:$src)>, Requires<[HasSSE2]>;
Evan Chenge9b9c672008-05-09 21:53:03 +00002829def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002830 (MOVLSS2PSrr (V_SET0), FR32:$src)>, Requires<[HasSSE2]>;
Evan Chenge259e872008-05-09 23:37:55 +00002831def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
2832 (MOVLPSrr (V_SET0), VR128:$src)>, Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002833}
2834
2835// Splat v2f64 / v2i64
2836let AddedComplexity = 10 in {
2837def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2838 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2839def : Pat<(vector_shuffle (v2f64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2840 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2841def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), SSE_splat_lo_mask:$sm),
2842 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2843def : Pat<(vector_shuffle (v2i64 VR128:$src), (undef), UNPCKH_shuffle_mask:$sm),
2844 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2845}
2846
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002847// Special unary SHUFPSrri case.
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002848def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef),
2849 SHUFP_unary_shuffle_mask:$sm)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002850 (SHUFPSrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2851 Requires<[HasSSE1]>;
Dan Gohman7dc19012007-08-02 21:17:01 +00002852// Special unary SHUFPDrri case.
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002853def : Pat<(v2f64 (vector_shuffle VR128:$src1, (undef),
2854 SHUFP_unary_shuffle_mask:$sm)),
Dan Gohman7dc19012007-08-02 21:17:01 +00002855 (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2856 Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002857// Unary v4f32 shuffle with PSHUF* in order to fold a load.
Evan Chengbf8b2c52008-04-05 00:30:36 +00002858def : Pat<(vector_shuffle (bc_v4i32 (memopv4f32 addr:$src1)), (undef),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002859 SHUFP_unary_shuffle_mask:$sm),
2860 (PSHUFDmi addr:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2861 Requires<[HasSSE2]>;
2862// Special binary v4i32 shuffle cases with SHUFPS.
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002863def : Pat<(v4i32 (vector_shuffle VR128:$src1, (v4i32 VR128:$src2),
2864 PSHUFD_binary_shuffle_mask:$sm)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002865 (SHUFPSrri VR128:$src1, VR128:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2866 Requires<[HasSSE2]>;
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002867def : Pat<(v4i32 (vector_shuffle VR128:$src1,
2868 (bc_v4i32 (memopv2i64 addr:$src2)), PSHUFD_binary_shuffle_mask:$sm)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002869 (SHUFPSrmi VR128:$src1, addr:$src2, PSHUFD_binary_shuffle_mask:$sm)>,
2870 Requires<[HasSSE2]>;
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002871// Special binary v2i64 shuffle cases using SHUFPDrri.
2872def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2873 SHUFP_shuffle_mask:$sm)),
2874 (SHUFPDrri VR128:$src1, VR128:$src2, SHUFP_shuffle_mask:$sm)>,
2875 Requires<[HasSSE2]>;
2876// Special unary SHUFPDrri case.
2877def : Pat<(v2i64 (vector_shuffle VR128:$src1, (undef),
2878 SHUFP_unary_shuffle_mask:$sm)),
2879 (SHUFPDrri VR128:$src1, VR128:$src1, SHUFP_unary_shuffle_mask:$sm)>,
2880 Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002881
2882// vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
2883let AddedComplexity = 10 in {
2884def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2885 UNPCKL_v_undef_shuffle_mask)),
2886 (UNPCKLPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2887def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2888 UNPCKL_v_undef_shuffle_mask)),
2889 (PUNPCKLBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2890def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2891 UNPCKL_v_undef_shuffle_mask)),
2892 (PUNPCKLWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2893def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2894 UNPCKL_v_undef_shuffle_mask)),
2895 (PUNPCKLDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2896}
2897
2898// vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
2899let AddedComplexity = 10 in {
2900def : Pat<(v4f32 (vector_shuffle VR128:$src, (undef),
2901 UNPCKH_v_undef_shuffle_mask)),
2902 (UNPCKHPSrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2903def : Pat<(v16i8 (vector_shuffle VR128:$src, (undef),
2904 UNPCKH_v_undef_shuffle_mask)),
2905 (PUNPCKHBWrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2906def : Pat<(v8i16 (vector_shuffle VR128:$src, (undef),
2907 UNPCKH_v_undef_shuffle_mask)),
2908 (PUNPCKHWDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
2909def : Pat<(v4i32 (vector_shuffle VR128:$src, (undef),
2910 UNPCKH_v_undef_shuffle_mask)),
2911 (PUNPCKHDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE1]>;
2912}
2913
2914let AddedComplexity = 15 in {
2915// vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
2916def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2917 MOVHP_shuffle_mask)),
2918 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
2919
2920// vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
2921def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2922 MOVHLPS_shuffle_mask)),
2923 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
2924
2925// vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
2926def : Pat<(v4f32 (vector_shuffle VR128:$src1, (undef),
2927 MOVHLPS_v_undef_shuffle_mask)),
2928 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2929def : Pat<(v4i32 (vector_shuffle VR128:$src1, (undef),
2930 MOVHLPS_v_undef_shuffle_mask)),
2931 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
2932}
2933
2934let AddedComplexity = 20 in {
2935// vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
2936// vector_shuffle v1, (load v2) <0, 1, 4, 5> using MOVHPS
Dan Gohman4a4f1512007-07-18 20:23:34 +00002937def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002938 MOVLP_shuffle_mask)),
2939 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002940def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002941 MOVLP_shuffle_mask)),
2942 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002943def : Pat<(v4f32 (vector_shuffle VR128:$src1, (memopv4f32 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002944 MOVHP_shuffle_mask)),
2945 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002946def : Pat<(v2f64 (vector_shuffle VR128:$src1, (memopv2f64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002947 MOVHP_shuffle_mask)),
2948 (MOVHPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2949
Dan Gohman4a4f1512007-07-18 20:23:34 +00002950def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002951 MOVLP_shuffle_mask)),
2952 (MOVLPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002953def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memopv2i64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002954 MOVLP_shuffle_mask)),
2955 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002956def : Pat<(v4i32 (vector_shuffle VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002957 MOVHP_shuffle_mask)),
2958 (MOVHPSrm VR128:$src1, addr:$src2)>, Requires<[HasSSE1]>;
Dan Gohman4a4f1512007-07-18 20:23:34 +00002959def : Pat<(v2i64 (vector_shuffle VR128:$src1, (memopv2i64 addr:$src2),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002960 MOVLP_shuffle_mask)),
2961 (MOVLPDrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
2962}
2963
2964let AddedComplexity = 15 in {
2965// Setting the lowest element in the vector.
2966def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2967 MOVL_shuffle_mask)),
2968 (MOVLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2969def : Pat<(v2i64 (vector_shuffle VR128:$src1, VR128:$src2,
2970 MOVL_shuffle_mask)),
2971 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2972
2973// vector_shuffle v1, v2 <4, 5, 2, 3> using MOVLPDrr (movsd)
2974def : Pat<(v4f32 (vector_shuffle VR128:$src1, VR128:$src2,
2975 MOVLP_shuffle_mask)),
2976 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2977def : Pat<(v4i32 (vector_shuffle VR128:$src1, VR128:$src2,
2978 MOVLP_shuffle_mask)),
2979 (MOVLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
2980}
2981
2982// Set lowest element and zero upper elements.
Evan Cheng15e8f5a2007-12-15 03:00:47 +00002983let AddedComplexity = 15 in
2984def : Pat<(v2f64 (vector_shuffle immAllZerosV_bc, VR128:$src,
2985 MOVL_shuffle_mask)),
2986 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
Evan Chenge9b9c672008-05-09 21:53:03 +00002987def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
Evan Chengd09a8a02008-05-08 22:35:02 +00002988 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00002989
2990// FIXME: Temporary workaround since 2-wide shuffle is broken.
2991def : Pat<(int_x86_sse2_movs_d VR128:$src1, VR128:$src2),
2992 (v2f64 (MOVLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
2993def : Pat<(int_x86_sse2_loadh_pd VR128:$src1, addr:$src2),
2994 (v2f64 (MOVHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2995def : Pat<(int_x86_sse2_loadl_pd VR128:$src1, addr:$src2),
2996 (v2f64 (MOVLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
2997def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, VR128:$src2, imm:$src3),
2998 (v2f64 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$src3))>,
2999 Requires<[HasSSE2]>;
3000def : Pat<(int_x86_sse2_shuf_pd VR128:$src1, (load addr:$src2), imm:$src3),
3001 (v2f64 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$src3))>,
3002 Requires<[HasSSE2]>;
3003def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, VR128:$src2),
3004 (v2f64 (UNPCKHPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
3005def : Pat<(int_x86_sse2_unpckh_pd VR128:$src1, (load addr:$src2)),
3006 (v2f64 (UNPCKHPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
3007def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, VR128:$src2),
3008 (v2f64 (UNPCKLPDrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
3009def : Pat<(int_x86_sse2_unpckl_pd VR128:$src1, (load addr:$src2)),
3010 (v2f64 (UNPCKLPDrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
3011def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, VR128:$src2),
3012 (v2i64 (PUNPCKHQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
3013def : Pat<(int_x86_sse2_punpckh_qdq VR128:$src1, (load addr:$src2)),
3014 (v2i64 (PUNPCKHQDQrm VR128:$src1, addr:$src2))>, Requires<[HasSSE2]>;
3015def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, VR128:$src2),
3016 (v2i64 (PUNPCKLQDQrr VR128:$src1, VR128:$src2))>, Requires<[HasSSE2]>;
3017def : Pat<(int_x86_sse2_punpckl_qdq VR128:$src1, (load addr:$src2)),
3018 (PUNPCKLQDQrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3019
3020// Some special case pandn patterns.
3021def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3022 VR128:$src2)),
3023 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3024def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3025 VR128:$src2)),
3026 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3027def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3028 VR128:$src2)),
3029 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3030
3031def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
Dan Gohman7dc19012007-08-02 21:17:01 +00003032 (memopv2i64 addr:$src2))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003033 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3034def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
Dan Gohman7dc19012007-08-02 21:17:01 +00003035 (memopv2i64 addr:$src2))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003036 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3037def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
Dan Gohman7dc19012007-08-02 21:17:01 +00003038 (memopv2i64 addr:$src2))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +00003039 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3040
Nate Begeman78246ca2007-11-17 03:58:34 +00003041// vector -> vector casts
3042def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3043 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3044def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3045 (Int_CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3046
Evan Cheng51a49b22007-07-20 00:27:43 +00003047// Use movaps / movups for SSE integer load / store (one byte shorter).
Dan Gohman11821702007-07-27 17:16:43 +00003048def : Pat<(alignedloadv4i32 addr:$src),
3049 (MOVAPSrm addr:$src)>, Requires<[HasSSE1]>;
3050def : Pat<(loadv4i32 addr:$src),
3051 (MOVUPSrm addr:$src)>, Requires<[HasSSE1]>;
Evan Cheng51a49b22007-07-20 00:27:43 +00003052def : Pat<(alignedloadv2i64 addr:$src),
3053 (MOVAPSrm addr:$src)>, Requires<[HasSSE2]>;
3054def : Pat<(loadv2i64 addr:$src),
3055 (MOVUPSrm addr:$src)>, Requires<[HasSSE2]>;
3056
3057def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3058 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3059def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3060 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3061def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3062 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3063def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3064 (MOVAPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3065def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3066 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3067def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3068 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3069def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3070 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
3071def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3072 (MOVUPSmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>;
Nate Begemanb2975562008-02-03 07:18:54 +00003073
3074//===----------------------------------------------------------------------===//
3075// SSE4.1 Instructions
3076//===----------------------------------------------------------------------===//
3077
Nate Begemanb2975562008-02-03 07:18:54 +00003078multiclass sse41_fp_unop_rm<bits<8> opcss, bits<8> opcps,
3079 bits<8> opcsd, bits<8> opcpd,
3080 string OpcodeStr,
3081 Intrinsic F32Int,
3082 Intrinsic V4F32Int,
3083 Intrinsic F64Int,
Nate Begemaneb3f5432008-02-04 05:34:34 +00003084 Intrinsic V2F64Int> {
Nate Begemanb2975562008-02-03 07:18:54 +00003085 // Intrinsic operation, reg.
Evan Cheng78d00612008-03-14 07:39:27 +00003086 def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
Nate Begeman72d802a2008-02-04 06:00:24 +00003087 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003088 !strconcat(OpcodeStr,
3089 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003090 [(set VR128:$dst, (F32Int VR128:$src1, imm:$src2))]>,
3091 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003092
3093 // Intrinsic operation, mem.
Evan Cheng78d00612008-03-14 07:39:27 +00003094 def SSm_Int : SS4AIi8<opcss, MRMSrcMem,
Nate Begeman72d802a2008-02-04 06:00:24 +00003095 (outs VR128:$dst), (ins ssmem:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003096 !strconcat(OpcodeStr,
3097 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003098 [(set VR128:$dst, (F32Int sse_load_f32:$src1, imm:$src2))]>,
3099 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003100
3101 // Vector intrinsic operation, reg
Evan Cheng78d00612008-03-14 07:39:27 +00003102 def PSr_Int : SS4AIi8<opcps, MRMSrcReg,
Nate Begeman72d802a2008-02-04 06:00:24 +00003103 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003104 !strconcat(OpcodeStr,
3105 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003106 [(set VR128:$dst, (V4F32Int VR128:$src1, imm:$src2))]>,
3107 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003108
3109 // Vector intrinsic operation, mem
Evan Cheng78d00612008-03-14 07:39:27 +00003110 def PSm_Int : SS4AIi8<opcps, MRMSrcMem,
Nate Begeman72d802a2008-02-04 06:00:24 +00003111 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003112 !strconcat(OpcodeStr,
3113 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003114 [(set VR128:$dst, (V4F32Int (load addr:$src1),imm:$src2))]>,
3115 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003116
3117 // Intrinsic operation, reg.
Evan Cheng78d00612008-03-14 07:39:27 +00003118 def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
Nate Begeman72d802a2008-02-04 06:00:24 +00003119 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003120 !strconcat(OpcodeStr,
3121 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003122 [(set VR128:$dst, (F64Int VR128:$src1, imm:$src2))]>,
3123 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003124
3125 // Intrinsic operation, mem.
Evan Cheng78d00612008-03-14 07:39:27 +00003126 def SDm_Int : SS4AIi8<opcsd, MRMSrcMem,
Nate Begeman72d802a2008-02-04 06:00:24 +00003127 (outs VR128:$dst), (ins sdmem:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003128 !strconcat(OpcodeStr,
3129 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003130 [(set VR128:$dst, (F64Int sse_load_f64:$src1, imm:$src2))]>,
3131 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003132
3133 // Vector intrinsic operation, reg
Evan Cheng78d00612008-03-14 07:39:27 +00003134 def PDr_Int : SS4AIi8<opcpd, MRMSrcReg,
Nate Begeman72d802a2008-02-04 06:00:24 +00003135 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003136 !strconcat(OpcodeStr,
3137 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003138 [(set VR128:$dst, (V2F64Int VR128:$src1, imm:$src2))]>,
3139 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003140
3141 // Vector intrinsic operation, mem
Evan Cheng78d00612008-03-14 07:39:27 +00003142 def PDm_Int : SS4AIi8<opcpd, MRMSrcMem,
Nate Begeman72d802a2008-02-04 06:00:24 +00003143 (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2),
Nate Begemanb2975562008-02-03 07:18:54 +00003144 !strconcat(OpcodeStr,
3145 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemaneb3f5432008-02-04 05:34:34 +00003146 [(set VR128:$dst, (V2F64Int (load addr:$src1),imm:$src2))]>,
3147 OpSize;
Nate Begemanb2975562008-02-03 07:18:54 +00003148}
3149
3150// FP round - roundss, roundps, roundsd, roundpd
3151defm ROUND : sse41_fp_unop_rm<0x0A, 0x08, 0x0B, 0x09, "round",
3152 int_x86_sse41_round_ss, int_x86_sse41_round_ps,
3153 int_x86_sse41_round_sd, int_x86_sse41_round_pd>;
Nate Begemaneb3f5432008-02-04 05:34:34 +00003154
3155// SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
3156multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
3157 Intrinsic IntId128> {
3158 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3159 (ins VR128:$src),
3160 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3161 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
3162 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3163 (ins i128mem:$src),
3164 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3165 [(set VR128:$dst,
3166 (IntId128
3167 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
3168}
3169
3170defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
3171 int_x86_sse41_phminposuw>;
3172
3173/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
Evan Cheng3ea4d672008-03-05 08:19:16 +00003174let Constraints = "$src1 = $dst" in {
Nate Begemaneb3f5432008-02-04 05:34:34 +00003175 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
3176 Intrinsic IntId128, bit Commutable = 0> {
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003177 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3178 (ins VR128:$src1, VR128:$src2),
3179 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3180 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3181 OpSize {
Nate Begemaneb3f5432008-02-04 05:34:34 +00003182 let isCommutable = Commutable;
3183 }
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003184 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3185 (ins VR128:$src1, i128mem:$src2),
3186 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3187 [(set VR128:$dst,
3188 (IntId128 VR128:$src1,
3189 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
Nate Begemaneb3f5432008-02-04 05:34:34 +00003190 }
3191}
3192
3193defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq",
3194 int_x86_sse41_pcmpeqq, 1>;
3195defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw",
3196 int_x86_sse41_packusdw, 0>;
3197defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb",
3198 int_x86_sse41_pminsb, 1>;
3199defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd",
3200 int_x86_sse41_pminsd, 1>;
3201defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud",
3202 int_x86_sse41_pminud, 1>;
3203defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw",
3204 int_x86_sse41_pminuw, 1>;
3205defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb",
3206 int_x86_sse41_pmaxsb, 1>;
3207defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd",
3208 int_x86_sse41_pmaxsd, 1>;
3209defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud",
3210 int_x86_sse41_pmaxud, 1>;
3211defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw",
3212 int_x86_sse41_pmaxuw, 1>;
Nate Begemaneb3f5432008-02-04 05:34:34 +00003213defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq",
3214 int_x86_sse41_pmuldq, 1>;
Nate Begeman72d802a2008-02-04 06:00:24 +00003215
Nate Begeman58057962008-02-09 01:38:08 +00003216
3217/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
Evan Cheng3ea4d672008-03-05 08:19:16 +00003218let Constraints = "$src1 = $dst" in {
Nate Begeman58057962008-02-09 01:38:08 +00003219 multiclass SS41I_binop_patint<bits<8> opc, string OpcodeStr, SDNode OpNode,
3220 Intrinsic IntId128, bit Commutable = 0> {
3221 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3222 (ins VR128:$src1, VR128:$src2),
3223 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3224 [(set VR128:$dst, (OpNode (v4i32 VR128:$src1),
3225 VR128:$src2))]>, OpSize {
3226 let isCommutable = Commutable;
3227 }
3228 def rr_int : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3229 (ins VR128:$src1, VR128:$src2),
3230 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3231 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3232 OpSize {
3233 let isCommutable = Commutable;
3234 }
3235 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3236 (ins VR128:$src1, i128mem:$src2),
3237 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3238 [(set VR128:$dst,
3239 (OpNode VR128:$src1, (memopv4i32 addr:$src2)))]>, OpSize;
3240 def rm_int : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3241 (ins VR128:$src1, i128mem:$src2),
3242 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3243 [(set VR128:$dst,
3244 (IntId128 VR128:$src1, (memopv4i32 addr:$src2)))]>,
3245 OpSize;
3246 }
3247}
3248defm PMULLD : SS41I_binop_patint<0x40, "pmulld", mul,
3249 int_x86_sse41_pmulld, 1>;
3250
3251
Evan Cheng78d00612008-03-14 07:39:27 +00003252/// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
Evan Cheng3ea4d672008-03-05 08:19:16 +00003253let Constraints = "$src1 = $dst" in {
Nate Begeman72d802a2008-02-04 06:00:24 +00003254 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
3255 Intrinsic IntId128, bit Commutable = 0> {
Evan Cheng78d00612008-03-14 07:39:27 +00003256 def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003257 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
3258 !strconcat(OpcodeStr,
Nate Begemanb4e9a042008-02-10 18:47:57 +00003259 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003260 [(set VR128:$dst,
3261 (IntId128 VR128:$src1, VR128:$src2, imm:$src3))]>,
3262 OpSize {
Nate Begeman72d802a2008-02-04 06:00:24 +00003263 let isCommutable = Commutable;
3264 }
Evan Cheng78d00612008-03-14 07:39:27 +00003265 def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003266 (ins VR128:$src1, i128mem:$src2, i32i8imm:$src3),
3267 !strconcat(OpcodeStr,
Nate Begemanb4e9a042008-02-10 18:47:57 +00003268 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003269 [(set VR128:$dst,
3270 (IntId128 VR128:$src1,
3271 (bitconvert (memopv16i8 addr:$src2)), imm:$src3))]>,
3272 OpSize;
Nate Begeman72d802a2008-02-04 06:00:24 +00003273 }
3274}
3275
3276defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps",
3277 int_x86_sse41_blendps, 0>;
3278defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd",
3279 int_x86_sse41_blendpd, 0>;
3280defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw",
3281 int_x86_sse41_pblendw, 0>;
3282defm DPPS : SS41I_binop_rmi_int<0x40, "dpps",
3283 int_x86_sse41_dpps, 1>;
3284defm DPPD : SS41I_binop_rmi_int<0x41, "dppd",
3285 int_x86_sse41_dppd, 1>;
3286defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw",
3287 int_x86_sse41_mpsadbw, 0>;
Nate Begeman58057962008-02-09 01:38:08 +00003288
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003289
Evan Cheng78d00612008-03-14 07:39:27 +00003290/// SS41I_ternary_int - SSE 4.1 ternary operator
Evan Cheng3ea4d672008-03-05 08:19:16 +00003291let Uses = [XMM0], Constraints = "$src1 = $dst" in {
Nate Begemanb4e9a042008-02-10 18:47:57 +00003292 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3293 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
3294 (ins VR128:$src1, VR128:$src2),
3295 !strconcat(OpcodeStr,
3296 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
3297 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
3298 OpSize;
3299
3300 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
3301 (ins VR128:$src1, i128mem:$src2),
3302 !strconcat(OpcodeStr,
3303 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
3304 [(set VR128:$dst,
3305 (IntId VR128:$src1,
3306 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
3307 }
3308}
3309
3310defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
3311defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
3312defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
3313
3314
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003315multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3316 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3317 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3318 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3319
3320 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3321 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3322 [(set VR128:$dst,
3323 (IntId (bitconvert (v4i32 (load addr:$src)))))]>, OpSize;
3324}
3325
3326defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
3327defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
3328defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
3329defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
3330defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
3331defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
3332
3333multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3334 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3335 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3336 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3337
3338 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3339 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3340 [(set VR128:$dst,
3341 (IntId (bitconvert (v4i32 (load addr:$src)))))]>, OpSize;
3342}
3343
3344defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
3345defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
3346defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
3347defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
3348
3349multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3350 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3351 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3352 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3353
3354 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
3355 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3356 [(set VR128:$dst,
3357 (IntId (bitconvert (v4i32 (load addr:$src)))))]>, OpSize;
3358}
3359
3360defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
3361defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovsxbq", int_x86_sse41_pmovzxbq>;
3362
3363
Nate Begemand77e59e2008-02-11 04:19:36 +00003364/// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
3365multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
Evan Chengc2054be2008-03-26 08:11:49 +00003366 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003367 (ins VR128:$src1, i32i8imm:$src2),
3368 !strconcat(OpcodeStr,
3369 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemand77e59e2008-02-11 04:19:36 +00003370 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
3371 OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003372 def mr : SS4AIi8<opc, MRMDestMem, (outs),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003373 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
3374 !strconcat(OpcodeStr,
3375 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Nate Begemand77e59e2008-02-11 04:19:36 +00003376 []>, OpSize;
3377// FIXME:
3378// There's an AssertZext in the way of writing the store pattern
3379// (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003380}
3381
Nate Begemand77e59e2008-02-11 04:19:36 +00003382defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003383
Nate Begemand77e59e2008-02-11 04:19:36 +00003384
3385/// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
3386multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
Evan Cheng78d00612008-03-14 07:39:27 +00003387 def mr : SS4AIi8<opc, MRMDestMem, (outs),
Nate Begemand77e59e2008-02-11 04:19:36 +00003388 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
3389 !strconcat(OpcodeStr,
3390 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3391 []>, OpSize;
3392// FIXME:
3393// There's an AssertZext in the way of writing the store pattern
3394// (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
3395}
3396
3397defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
3398
3399
3400/// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
3401multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
Evan Chengc2054be2008-03-26 08:11:49 +00003402 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003403 (ins VR128:$src1, i32i8imm:$src2),
3404 !strconcat(OpcodeStr,
3405 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3406 [(set GR32:$dst,
3407 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003408 def mr : SS4AIi8<opc, MRMDestMem, (outs),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003409 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
3410 !strconcat(OpcodeStr,
3411 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3412 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
3413 addr:$dst)]>, OpSize;
Nate Begeman58057962008-02-09 01:38:08 +00003414}
3415
Nate Begemand77e59e2008-02-11 04:19:36 +00003416defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
Nate Begeman58057962008-02-09 01:38:08 +00003417
Nate Begemand77e59e2008-02-11 04:19:36 +00003418
Evan Cheng6c249332008-03-24 21:52:23 +00003419/// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
3420/// destination
Nate Begemand77e59e2008-02-11 04:19:36 +00003421multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
Evan Chengc2054be2008-03-26 08:11:49 +00003422 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003423 (ins VR128:$src1, i32i8imm:$src2),
3424 !strconcat(OpcodeStr,
3425 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Dan Gohman788db592008-04-16 02:32:24 +00003426 [(set GR32:$dst,
3427 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
Evan Cheng6c249332008-03-24 21:52:23 +00003428 OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003429 def mr : SS4AIi8<opc, MRMDestMem, (outs),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003430 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
3431 !strconcat(OpcodeStr,
3432 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
Evan Cheng6c249332008-03-24 21:52:23 +00003433 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003434 addr:$dst)]>, OpSize;
Nate Begeman58057962008-02-09 01:38:08 +00003435}
3436
Nate Begemand77e59e2008-02-11 04:19:36 +00003437defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
Nate Begeman9a58b8a2008-02-09 23:46:37 +00003438
Evan Cheng3ea4d672008-03-05 08:19:16 +00003439let Constraints = "$src1 = $dst" in {
Nate Begemand77e59e2008-02-11 04:19:36 +00003440 multiclass SS41I_insert8<bits<8> opc, string OpcodeStr> {
Evan Cheng78d00612008-03-14 07:39:27 +00003441 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003442 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
3443 !strconcat(OpcodeStr,
3444 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3445 [(set VR128:$dst,
3446 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003447 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003448 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
3449 !strconcat(OpcodeStr,
3450 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3451 [(set VR128:$dst,
3452 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
3453 imm:$src3))]>, OpSize;
3454 }
3455}
3456
3457defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
3458
Evan Cheng3ea4d672008-03-05 08:19:16 +00003459let Constraints = "$src1 = $dst" in {
Nate Begemand77e59e2008-02-11 04:19:36 +00003460 multiclass SS41I_insert32<bits<8> opc, string OpcodeStr> {
Evan Cheng78d00612008-03-14 07:39:27 +00003461 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003462 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
3463 !strconcat(OpcodeStr,
3464 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3465 [(set VR128:$dst,
3466 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
3467 OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003468 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003469 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
3470 !strconcat(OpcodeStr,
3471 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3472 [(set VR128:$dst,
3473 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
3474 imm:$src3)))]>, OpSize;
3475 }
3476}
3477
3478defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
3479
Evan Cheng3ea4d672008-03-05 08:19:16 +00003480let Constraints = "$src1 = $dst" in {
Nate Begemand77e59e2008-02-11 04:19:36 +00003481 multiclass SS41I_insertf32<bits<8> opc, string OpcodeStr> {
Evan Cheng78d00612008-03-14 07:39:27 +00003482 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003483 (ins VR128:$src1, FR32:$src2, i32i8imm:$src3),
3484 !strconcat(OpcodeStr,
3485 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3486 [(set VR128:$dst,
3487 (X86insrtps VR128:$src1, FR32:$src2, imm:$src3))]>, OpSize;
Evan Cheng78d00612008-03-14 07:39:27 +00003488 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
Nate Begemand77e59e2008-02-11 04:19:36 +00003489 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
3490 !strconcat(OpcodeStr,
3491 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3492 [(set VR128:$dst,
3493 (X86insrtps VR128:$src1, (loadf32 addr:$src2),
3494 imm:$src3))]>, OpSize;
3495 }
3496}
3497
Evan Chengc2054be2008-03-26 08:11:49 +00003498defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
Nate Begeman0dd3cb52008-03-16 21:14:46 +00003499
3500let Defs = [EFLAGS] in {
3501def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
3502 "ptest \t{$src2, $src1|$src1, $src2}", []>, OpSize;
3503def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2),
3504 "ptest \t{$src2, $src1|$src1, $src2}", []>, OpSize;
3505}
3506
3507def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3508 "movntdqa\t{$src, $dst|$dst, $src}",
3509 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>;