blob: b167a7ac88d808e08a2481705e14e6b1d2514569 [file] [log] [blame]
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001//====- X86InstrMMX.td - Describe the X86 Instruction Set --*- tablegen -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner081ce942007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file describes the X86 MMX instruction set, defining the instructions,
11// and properties of the instructions which are needed for code generation,
12// machine code emission, and analysis.
13//
14//===----------------------------------------------------------------------===//
15
Dan Gohmanf17a25c2007-07-18 16:29:46 +000016//===----------------------------------------------------------------------===//
17// MMX Pattern Fragments
18//===----------------------------------------------------------------------===//
19
20def load_mmx : PatFrag<(ops node:$ptr), (v1i64 (load node:$ptr))>;
21
22def bc_v8i8 : PatFrag<(ops node:$in), (v8i8 (bitconvert node:$in))>;
23def bc_v4i16 : PatFrag<(ops node:$in), (v4i16 (bitconvert node:$in))>;
24def bc_v2i32 : PatFrag<(ops node:$in), (v2i32 (bitconvert node:$in))>;
25def bc_v1i64 : PatFrag<(ops node:$in), (v1i64 (bitconvert node:$in))>;
26
27//===----------------------------------------------------------------------===//
28// MMX Masks
29//===----------------------------------------------------------------------===//
30
31// MMX_SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to
32// PSHUFW imm.
33def MMX_SHUFFLE_get_shuf_imm : SDNodeXForm<build_vector, [{
34 return getI8Imm(X86::getShuffleSHUFImmediate(N));
35}]>;
36
37// Patterns for: vector_shuffle v1, v2, <2, 6, 3, 7, ...>
38def MMX_UNPCKH_shuffle_mask : PatLeaf<(build_vector), [{
39 return X86::isUNPCKHMask(N);
40}]>;
41
42// Patterns for: vector_shuffle v1, v2, <0, 4, 2, 5, ...>
43def MMX_UNPCKL_shuffle_mask : PatLeaf<(build_vector), [{
44 return X86::isUNPCKLMask(N);
45}]>;
46
47// Patterns for: vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
48def MMX_UNPCKH_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
49 return X86::isUNPCKH_v_undef_Mask(N);
50}]>;
51
52// Patterns for: vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
53def MMX_UNPCKL_v_undef_shuffle_mask : PatLeaf<(build_vector), [{
54 return X86::isUNPCKL_v_undef_Mask(N);
55}]>;
56
57// Patterns for shuffling.
58def MMX_PSHUFW_shuffle_mask : PatLeaf<(build_vector), [{
59 return X86::isPSHUFDMask(N);
60}], MMX_SHUFFLE_get_shuf_imm>;
61
62// Patterns for: vector_shuffle v1, v2, <4, 5, 2, 3>; etc.
63def MMX_MOVL_shuffle_mask : PatLeaf<(build_vector), [{
64 return X86::isMOVLMask(N);
65}]>;
66
67//===----------------------------------------------------------------------===//
68// MMX Multiclasses
69//===----------------------------------------------------------------------===//
70
71let isTwoAddress = 1 in {
72 // MMXI_binop_rm - Simple MMX binary operator.
73 multiclass MMXI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
74 ValueType OpVT, bit Commutable = 0> {
Evan Chengb783fa32007-07-19 01:14:50 +000075 def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +000076 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +000077 [(set VR64:$dst, (OpVT (OpNode VR64:$src1, VR64:$src2)))]> {
78 let isCommutable = Commutable;
79 }
Evan Chengb783fa32007-07-19 01:14:50 +000080 def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +000081 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +000082 [(set VR64:$dst, (OpVT (OpNode VR64:$src1,
83 (bitconvert
84 (load_mmx addr:$src2)))))]>;
85 }
86
87 multiclass MMXI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
88 bit Commutable = 0> {
Evan Chengb783fa32007-07-19 01:14:50 +000089 def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +000090 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +000091 [(set VR64:$dst, (IntId VR64:$src1, VR64:$src2))]> {
92 let isCommutable = Commutable;
93 }
Evan Chengb783fa32007-07-19 01:14:50 +000094 def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +000095 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +000096 [(set VR64:$dst, (IntId VR64:$src1,
97 (bitconvert (load_mmx addr:$src2))))]>;
98 }
99
100 // MMXI_binop_rm_v1i64 - Simple MMX binary operator whose type is v1i64.
101 //
102 // FIXME: we could eliminate this and use MMXI_binop_rm instead if tblgen knew
103 // to collapse (bitconvert VT to VT) into its operand.
104 //
105 multiclass MMXI_binop_rm_v1i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
106 bit Commutable = 0> {
Evan Cheng7fcccab2008-03-21 00:40:09 +0000107 def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst),
108 (ins VR64:$src1, VR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000109 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000110 [(set VR64:$dst, (v1i64 (OpNode VR64:$src1, VR64:$src2)))]> {
111 let isCommutable = Commutable;
112 }
Evan Cheng7fcccab2008-03-21 00:40:09 +0000113 def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst),
114 (ins VR64:$src1, i64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000115 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000116 [(set VR64:$dst,
117 (OpNode VR64:$src1,(load_mmx addr:$src2)))]>;
118 }
119
120 multiclass MMXI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
Evan Chengf90f8f82008-05-03 00:52:09 +0000121 string OpcodeStr, Intrinsic IntId,
122 Intrinsic IntId2> {
Evan Cheng7fcccab2008-03-21 00:40:09 +0000123 def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst),
124 (ins VR64:$src1, VR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000125 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000126 [(set VR64:$dst, (IntId VR64:$src1, VR64:$src2))]>;
Evan Cheng7fcccab2008-03-21 00:40:09 +0000127 def rm : MMXI<opc, MRMSrcMem, (outs VR64:$dst),
128 (ins VR64:$src1, i64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000129 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000130 [(set VR64:$dst, (IntId VR64:$src1,
131 (bitconvert (load_mmx addr:$src2))))]>;
Evan Cheng7fcccab2008-03-21 00:40:09 +0000132 def ri : MMXIi8<opc2, ImmForm, (outs VR64:$dst),
133 (ins VR64:$src1, i32i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000134 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
Evan Chengf90f8f82008-05-03 00:52:09 +0000135 [(set VR64:$dst, (IntId2 VR64:$src1, (i32 imm:$src2)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000136 }
137}
138
139//===----------------------------------------------------------------------===//
140// MMX EMMS & FEMMS Instructions
141//===----------------------------------------------------------------------===//
142
Evan Chengb783fa32007-07-19 01:14:50 +0000143def MMX_EMMS : MMXI<0x77, RawFrm, (outs), (ins), "emms", [(int_x86_mmx_emms)]>;
144def MMX_FEMMS : MMXI<0x0E, RawFrm, (outs), (ins), "femms", [(int_x86_mmx_femms)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000145
146//===----------------------------------------------------------------------===//
147// MMX Scalar Instructions
148//===----------------------------------------------------------------------===//
149
150// Data Transfer Instructions
Evan Chengb783fa32007-07-19 01:14:50 +0000151def MMX_MOVD64rr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src),
Evan Chengd1045a62008-02-18 23:04:32 +0000152 "movd\t{$src, $dst|$dst, $src}",
153 [(set VR64:$dst, (v2i32 (scalar_to_vector GR32:$src)))]>;
154let isSimpleLoad = 1, isReMaterializable = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000155def MMX_MOVD64rm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src),
Evan Chengd1045a62008-02-18 23:04:32 +0000156 "movd\t{$src, $dst|$dst, $src}",
157 [(set VR64:$dst, (v2i32 (scalar_to_vector (loadi32 addr:$src))))]>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000158let mayStore = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000159def MMX_MOVD64mr : MMXI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000160 "movd\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000161
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000162let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000163def MMX_MOVD64to64rr : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000164 "movd\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000165
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000166let neverHasSideEffects = 1 in
Dan Gohmanf240c5d2008-04-21 19:52:29 +0000167def MMX_MOVD64from64rr : MMXRI<0x7E, MRMSrcReg, (outs GR64:$dst), (ins VR64:$src),
Dan Gohman4535ae32008-04-15 23:55:07 +0000168 "movd\t{$src, $dst|$dst, $src}", []>;
169
170let neverHasSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000171def MMX_MOVQ64rr : MMXI<0x6F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000172 "movq\t{$src, $dst|$dst, $src}", []>;
Chris Lattner1a1932c2008-01-06 23:38:27 +0000173let isSimpleLoad = 1, isReMaterializable = 1, mayHaveSideEffects = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000174def MMX_MOVQ64rm : MMXI<0x6F, MRMSrcMem, (outs VR64:$dst), (ins i64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000175 "movq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000176 [(set VR64:$dst, (load_mmx addr:$src))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000177def MMX_MOVQ64mr : MMXI<0x7F, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000178 "movq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000179 [(store (v1i64 VR64:$src), addr:$dst)]>;
180
Evan Chengb783fa32007-07-19 01:14:50 +0000181def MMX_MOVDQ2Qrr : MMXID<0xD6, MRMDestMem, (outs VR64:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000182 "movdq2q\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000183 [(set VR64:$dst,
Evan Cheng1428f582008-04-25 20:12:46 +0000184 (v1i64 (bitconvert
185 (i64 (vector_extract (v2i64 VR128:$src),
186 (iPTR 0))))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000187
Evan Chengb783fa32007-07-19 01:14:50 +0000188def MMX_MOVQ2DQrr : MMXIS<0xD6, MRMDestMem, (outs VR128:$dst), (ins VR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000189 "movq2dq\t{$src, $dst|$dst, $src}",
Evan Cheng5e4d1e72008-04-25 18:19:54 +0000190 [(set VR128:$dst,
191 (v2i64 (vector_shuffle immAllZerosV,
192 (v2i64 (scalar_to_vector (i64 (bitconvert VR64:$src)))),
193 MOVL_shuffle_mask)))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000194
Evan Chengb783fa32007-07-19 01:14:50 +0000195def MMX_MOVNTQmr : MMXI<0xE7, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000196 "movntq\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000197 [(int_x86_mmx_movnt_dq addr:$dst, VR64:$src)]>;
198
199let AddedComplexity = 15 in
200// movd to MMX register zero-extends
Anders Carlssona31d51a2008-02-29 01:35:12 +0000201def MMX_MOVZDI2PDIrr : MMXI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR32:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000202 "movd\t{$src, $dst|$dst, $src}",
Evan Cheng40ee6e52008-05-08 00:57:18 +0000203 [(set VR64:$dst,
Evan Chenge9b9c672008-05-09 21:53:03 +0000204 (v2i32 (X86vzmovl (v2i32 (scalar_to_vector GR32:$src)))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000205let AddedComplexity = 20 in
Anders Carlssona31d51a2008-02-29 01:35:12 +0000206def MMX_MOVZDI2PDIrm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000207 "movd\t{$src, $dst|$dst, $src}",
Evan Cheng40ee6e52008-05-08 00:57:18 +0000208 [(set VR64:$dst,
Evan Chenge9b9c672008-05-09 21:53:03 +0000209 (v2i32 (X86vzmovl (v2i32
Evan Cheng40ee6e52008-05-08 00:57:18 +0000210 (scalar_to_vector (loadi32 addr:$src))))))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000211
212// Arithmetic Instructions
213
214// -- Addition
215defm MMX_PADDB : MMXI_binop_rm<0xFC, "paddb", add, v8i8, 1>;
216defm MMX_PADDW : MMXI_binop_rm<0xFD, "paddw", add, v4i16, 1>;
217defm MMX_PADDD : MMXI_binop_rm<0xFE, "paddd", add, v2i32, 1>;
218defm MMX_PADDQ : MMXI_binop_rm<0xD4, "paddq", add, v1i64, 1>;
219
220defm MMX_PADDSB : MMXI_binop_rm_int<0xEC, "paddsb" , int_x86_mmx_padds_b, 1>;
221defm MMX_PADDSW : MMXI_binop_rm_int<0xED, "paddsw" , int_x86_mmx_padds_w, 1>;
222
223defm MMX_PADDUSB : MMXI_binop_rm_int<0xDC, "paddusb", int_x86_mmx_paddus_b, 1>;
224defm MMX_PADDUSW : MMXI_binop_rm_int<0xDD, "paddusw", int_x86_mmx_paddus_w, 1>;
225
226// -- Subtraction
227defm MMX_PSUBB : MMXI_binop_rm<0xF8, "psubb", sub, v8i8>;
228defm MMX_PSUBW : MMXI_binop_rm<0xF9, "psubw", sub, v4i16>;
229defm MMX_PSUBD : MMXI_binop_rm<0xFA, "psubd", sub, v2i32>;
230defm MMX_PSUBQ : MMXI_binop_rm<0xFB, "psubq", sub, v1i64>;
231
232defm MMX_PSUBSB : MMXI_binop_rm_int<0xE8, "psubsb" , int_x86_mmx_psubs_b>;
233defm MMX_PSUBSW : MMXI_binop_rm_int<0xE9, "psubsw" , int_x86_mmx_psubs_w>;
234
235defm MMX_PSUBUSB : MMXI_binop_rm_int<0xD8, "psubusb", int_x86_mmx_psubus_b>;
236defm MMX_PSUBUSW : MMXI_binop_rm_int<0xD9, "psubusw", int_x86_mmx_psubus_w>;
237
238// -- Multiplication
239defm MMX_PMULLW : MMXI_binop_rm<0xD5, "pmullw", mul, v4i16, 1>;
240
241defm MMX_PMULHW : MMXI_binop_rm_int<0xE5, "pmulhw", int_x86_mmx_pmulh_w, 1>;
242defm MMX_PMULHUW : MMXI_binop_rm_int<0xE4, "pmulhuw", int_x86_mmx_pmulhu_w, 1>;
243defm MMX_PMULUDQ : MMXI_binop_rm_int<0xF4, "pmuludq", int_x86_mmx_pmulu_dq, 1>;
244
245// -- Miscellanea
246defm MMX_PMADDWD : MMXI_binop_rm_int<0xF5, "pmaddwd", int_x86_mmx_pmadd_wd, 1>;
247
248defm MMX_PAVGB : MMXI_binop_rm_int<0xE0, "pavgb", int_x86_mmx_pavg_b, 1>;
249defm MMX_PAVGW : MMXI_binop_rm_int<0xE3, "pavgw", int_x86_mmx_pavg_w, 1>;
250
251defm MMX_PMINUB : MMXI_binop_rm_int<0xDA, "pminub", int_x86_mmx_pminu_b, 1>;
252defm MMX_PMINSW : MMXI_binop_rm_int<0xEA, "pminsw", int_x86_mmx_pmins_w, 1>;
253
254defm MMX_PMAXUB : MMXI_binop_rm_int<0xDE, "pmaxub", int_x86_mmx_pmaxu_b, 1>;
255defm MMX_PMAXSW : MMXI_binop_rm_int<0xEE, "pmaxsw", int_x86_mmx_pmaxs_w, 1>;
256
257defm MMX_PSADBW : MMXI_binop_rm_int<0xE0, "psadbw", int_x86_mmx_psad_bw, 1>;
258
259// Logical Instructions
260defm MMX_PAND : MMXI_binop_rm_v1i64<0xDB, "pand", and, 1>;
261defm MMX_POR : MMXI_binop_rm_v1i64<0xEB, "por" , or, 1>;
262defm MMX_PXOR : MMXI_binop_rm_v1i64<0xEF, "pxor", xor, 1>;
263
264let isTwoAddress = 1 in {
265 def MMX_PANDNrr : MMXI<0xDF, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000266 (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000267 "pandn\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000268 [(set VR64:$dst, (v1i64 (and (vnot VR64:$src1),
269 VR64:$src2)))]>;
270 def MMX_PANDNrm : MMXI<0xDF, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000271 (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000272 "pandn\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000273 [(set VR64:$dst, (v1i64 (and (vnot VR64:$src1),
274 (load addr:$src2))))]>;
275}
276
277// Shift Instructions
278defm MMX_PSRLW : MMXI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
Evan Chengf90f8f82008-05-03 00:52:09 +0000279 int_x86_mmx_psrl_w, int_x86_mmx_psrli_w>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000280defm MMX_PSRLD : MMXI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
Evan Chengf90f8f82008-05-03 00:52:09 +0000281 int_x86_mmx_psrl_d, int_x86_mmx_psrli_d>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000282defm MMX_PSRLQ : MMXI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
Evan Chengf90f8f82008-05-03 00:52:09 +0000283 int_x86_mmx_psrl_q, int_x86_mmx_psrli_q>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000284
285defm MMX_PSLLW : MMXI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
Evan Chengf90f8f82008-05-03 00:52:09 +0000286 int_x86_mmx_psll_w, int_x86_mmx_pslli_w>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000287defm MMX_PSLLD : MMXI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
Evan Chengf90f8f82008-05-03 00:52:09 +0000288 int_x86_mmx_psll_d, int_x86_mmx_pslli_d>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000289defm MMX_PSLLQ : MMXI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
Evan Chengf90f8f82008-05-03 00:52:09 +0000290 int_x86_mmx_psll_q, int_x86_mmx_pslli_q>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000291
292defm MMX_PSRAW : MMXI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
Evan Chengf90f8f82008-05-03 00:52:09 +0000293 int_x86_mmx_psra_w, int_x86_mmx_psrai_w>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000294defm MMX_PSRAD : MMXI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
Evan Chengf90f8f82008-05-03 00:52:09 +0000295 int_x86_mmx_psra_d, int_x86_mmx_psrai_d>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000296
Evan Chengdea99362008-05-29 08:22:04 +0000297// Shift up / down and insert zero's.
298def : Pat<(v1i64 (X86vshl VR64:$src, (i8 imm:$amt))),
299 (v1i64 (MMX_PSLLQri VR64:$src, imm:$amt))>;
300def : Pat<(v1i64 (X86vshr VR64:$src, (i8 imm:$amt))),
301 (v1i64 (MMX_PSRLQri VR64:$src, imm:$amt))>;
302
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000303// Comparison Instructions
304defm MMX_PCMPEQB : MMXI_binop_rm_int<0x74, "pcmpeqb", int_x86_mmx_pcmpeq_b>;
305defm MMX_PCMPEQW : MMXI_binop_rm_int<0x75, "pcmpeqw", int_x86_mmx_pcmpeq_w>;
306defm MMX_PCMPEQD : MMXI_binop_rm_int<0x76, "pcmpeqd", int_x86_mmx_pcmpeq_d>;
307
308defm MMX_PCMPGTB : MMXI_binop_rm_int<0x64, "pcmpgtb", int_x86_mmx_pcmpgt_b>;
309defm MMX_PCMPGTW : MMXI_binop_rm_int<0x65, "pcmpgtw", int_x86_mmx_pcmpgt_w>;
310defm MMX_PCMPGTD : MMXI_binop_rm_int<0x66, "pcmpgtd", int_x86_mmx_pcmpgt_d>;
311
312// Conversion Instructions
313
314// -- Unpack Instructions
315let isTwoAddress = 1 in {
316 // Unpack High Packed Data Instructions
317 def MMX_PUNPCKHBWrr : MMXI<0x68, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000318 (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000319 "punpckhbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000320 [(set VR64:$dst,
321 (v8i8 (vector_shuffle VR64:$src1, VR64:$src2,
322 MMX_UNPCKH_shuffle_mask)))]>;
323 def MMX_PUNPCKHBWrm : MMXI<0x68, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000324 (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000325 "punpckhbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000326 [(set VR64:$dst,
327 (v8i8 (vector_shuffle VR64:$src1,
328 (bc_v8i8 (load_mmx addr:$src2)),
329 MMX_UNPCKH_shuffle_mask)))]>;
330
331 def MMX_PUNPCKHWDrr : MMXI<0x69, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000332 (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000333 "punpckhwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000334 [(set VR64:$dst,
335 (v4i16 (vector_shuffle VR64:$src1, VR64:$src2,
336 MMX_UNPCKH_shuffle_mask)))]>;
337 def MMX_PUNPCKHWDrm : MMXI<0x69, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000338 (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000339 "punpckhwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000340 [(set VR64:$dst,
341 (v4i16 (vector_shuffle VR64:$src1,
342 (bc_v4i16 (load_mmx addr:$src2)),
343 MMX_UNPCKH_shuffle_mask)))]>;
344
345 def MMX_PUNPCKHDQrr : MMXI<0x6A, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000346 (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000347 "punpckhdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000348 [(set VR64:$dst,
349 (v2i32 (vector_shuffle VR64:$src1, VR64:$src2,
350 MMX_UNPCKH_shuffle_mask)))]>;
351 def MMX_PUNPCKHDQrm : MMXI<0x6A, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000352 (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000353 "punpckhdq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000354 [(set VR64:$dst,
355 (v2i32 (vector_shuffle VR64:$src1,
356 (bc_v2i32 (load_mmx addr:$src2)),
357 MMX_UNPCKH_shuffle_mask)))]>;
358
359 // Unpack Low Packed Data Instructions
360 def MMX_PUNPCKLBWrr : MMXI<0x60, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000361 (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000362 "punpcklbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000363 [(set VR64:$dst,
364 (v8i8 (vector_shuffle VR64:$src1, VR64:$src2,
365 MMX_UNPCKL_shuffle_mask)))]>;
366 def MMX_PUNPCKLBWrm : MMXI<0x60, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000367 (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000368 "punpcklbw\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000369 [(set VR64:$dst,
370 (v8i8 (vector_shuffle VR64:$src1,
371 (bc_v8i8 (load_mmx addr:$src2)),
372 MMX_UNPCKL_shuffle_mask)))]>;
373
374 def MMX_PUNPCKLWDrr : MMXI<0x61, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000375 (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000376 "punpcklwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000377 [(set VR64:$dst,
378 (v4i16 (vector_shuffle VR64:$src1, VR64:$src2,
379 MMX_UNPCKL_shuffle_mask)))]>;
380 def MMX_PUNPCKLWDrm : MMXI<0x61, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000381 (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000382 "punpcklwd\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000383 [(set VR64:$dst,
384 (v4i16 (vector_shuffle VR64:$src1,
385 (bc_v4i16 (load_mmx addr:$src2)),
386 MMX_UNPCKL_shuffle_mask)))]>;
387
388 def MMX_PUNPCKLDQrr : MMXI<0x62, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000389 (outs VR64:$dst), (ins VR64:$src1, VR64:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000390 "punpckldq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000391 [(set VR64:$dst,
392 (v2i32 (vector_shuffle VR64:$src1, VR64:$src2,
393 MMX_UNPCKL_shuffle_mask)))]>;
394 def MMX_PUNPCKLDQrm : MMXI<0x62, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000395 (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000396 "punpckldq\t{$src2, $dst|$dst, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000397 [(set VR64:$dst,
398 (v2i32 (vector_shuffle VR64:$src1,
399 (bc_v2i32 (load_mmx addr:$src2)),
400 MMX_UNPCKL_shuffle_mask)))]>;
401}
402
403// -- Pack Instructions
404defm MMX_PACKSSWB : MMXI_binop_rm_int<0x63, "packsswb", int_x86_mmx_packsswb>;
405defm MMX_PACKSSDW : MMXI_binop_rm_int<0x6B, "packssdw", int_x86_mmx_packssdw>;
406defm MMX_PACKUSWB : MMXI_binop_rm_int<0x67, "packuswb", int_x86_mmx_packuswb>;
407
408// -- Shuffle Instructions
409def MMX_PSHUFWri : MMXIi8<0x70, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000410 (outs VR64:$dst), (ins VR64:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000411 "pshufw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000412 [(set VR64:$dst,
413 (v4i16 (vector_shuffle
414 VR64:$src1, (undef),
415 MMX_PSHUFW_shuffle_mask:$src2)))]>;
416def MMX_PSHUFWmi : MMXIi8<0x70, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000417 (outs VR64:$dst), (ins i64mem:$src1, i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000418 "pshufw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000419 [(set VR64:$dst,
420 (v4i16 (vector_shuffle
421 (bc_v4i16 (load_mmx addr:$src1)),
422 (undef),
423 MMX_PSHUFW_shuffle_mask:$src2)))]>;
424
425// -- Conversion Instructions
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000426let neverHasSideEffects = 1 in {
Evan Chengb783fa32007-07-19 01:14:50 +0000427def MMX_CVTPD2PIrr : MMX2I<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000428 "cvtpd2pi\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000429let mayLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000430def MMX_CVTPD2PIrm : MMX2I<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000431 "cvtpd2pi\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000432
Evan Chengb783fa32007-07-19 01:14:50 +0000433def MMX_CVTPI2PDrr : MMX2I<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000434 "cvtpi2pd\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000435let mayLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000436def MMX_CVTPI2PDrm : MMX2I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000437 "cvtpi2pd\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000438
Evan Chengb783fa32007-07-19 01:14:50 +0000439def MMX_CVTPI2PSrr : MMXI<0x2A, MRMSrcReg, (outs VR128:$dst), (ins VR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000440 "cvtpi2ps\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000441let mayLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000442def MMX_CVTPI2PSrm : MMXI<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000443 "cvtpi2ps\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000444
Evan Chengb783fa32007-07-19 01:14:50 +0000445def MMX_CVTPS2PIrr : MMXI<0x2D, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000446 "cvtps2pi\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000447let mayLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000448def MMX_CVTPS2PIrm : MMXI<0x2D, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000449 "cvtps2pi\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000450
Evan Chengb783fa32007-07-19 01:14:50 +0000451def MMX_CVTTPD2PIrr : MMX2I<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000452 "cvttpd2pi\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000453let mayLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000454def MMX_CVTTPD2PIrm : MMX2I<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f128mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000455 "cvttpd2pi\t{$src, $dst|$dst, $src}", []>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000456
Evan Chengb783fa32007-07-19 01:14:50 +0000457def MMX_CVTTPS2PIrr : MMXI<0x2C, MRMSrcReg, (outs VR64:$dst), (ins VR128:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000458 "cvttps2pi\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000459let mayLoad = 1 in
Evan Chengb783fa32007-07-19 01:14:50 +0000460def MMX_CVTTPS2PIrm : MMXI<0x2C, MRMSrcMem, (outs VR64:$dst), (ins f64mem:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000461 "cvttps2pi\t{$src, $dst|$dst, $src}", []>;
Chris Lattnerc90ee9c2008-01-10 07:59:24 +0000462} // end neverHasSideEffects
463
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000464
465// Extract / Insert
466def MMX_X86pextrw : SDNode<"X86ISD::PEXTRW", SDTypeProfile<1, 2, []>, []>;
467def MMX_X86pinsrw : SDNode<"X86ISD::PINSRW", SDTypeProfile<1, 3, []>, []>;
468
469def MMX_PEXTRWri : MMXIi8<0xC5, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000470 (outs GR32:$dst), (ins VR64:$src1, i16i8imm:$src2),
Dan Gohman91888f02007-07-31 20:11:57 +0000471 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000472 [(set GR32:$dst, (MMX_X86pextrw (v4i16 VR64:$src1),
473 (iPTR imm:$src2)))]>;
474let isTwoAddress = 1 in {
475 def MMX_PINSRWrri : MMXIi8<0xC4, MRMSrcReg,
Evan Chengb783fa32007-07-19 01:14:50 +0000476 (outs VR64:$dst), (ins VR64:$src1, GR32:$src2, i16i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +0000477 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000478 [(set VR64:$dst, (v4i16 (MMX_X86pinsrw (v4i16 VR64:$src1),
479 GR32:$src2, (iPTR imm:$src3))))]>;
480 def MMX_PINSRWrmi : MMXIi8<0xC4, MRMSrcMem,
Evan Chengb783fa32007-07-19 01:14:50 +0000481 (outs VR64:$dst), (ins VR64:$src1, i16mem:$src2, i16i8imm:$src3),
Dan Gohman91888f02007-07-31 20:11:57 +0000482 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000483 [(set VR64:$dst,
484 (v4i16 (MMX_X86pinsrw (v4i16 VR64:$src1),
485 (i32 (anyext (loadi16 addr:$src2))),
486 (iPTR imm:$src3))))]>;
487}
488
489// Mask creation
Evan Chengb783fa32007-07-19 01:14:50 +0000490def MMX_PMOVMSKBrr : MMXI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR64:$src),
Dan Gohman91888f02007-07-31 20:11:57 +0000491 "pmovmskb\t{$src, $dst|$dst, $src}",
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000492 [(set GR32:$dst, (int_x86_mmx_pmovmskb VR64:$src))]>;
493
494// Misc.
Evan Cheng6e4d1d92007-09-11 19:55:27 +0000495let Uses = [EDI] in
Evan Chengb783fa32007-07-19 01:14:50 +0000496def MMX_MASKMOVQ : MMXI<0xF7, MRMDestMem, (outs), (ins VR64:$src, VR64:$mask),
Dan Gohman91888f02007-07-31 20:11:57 +0000497 "maskmovq\t{$mask, $src|$src, $mask}",
Evan Cheng6e4d1d92007-09-11 19:55:27 +0000498 [(int_x86_mmx_maskmovq VR64:$src, VR64:$mask, EDI)]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000499
500//===----------------------------------------------------------------------===//
501// Alias Instructions
502//===----------------------------------------------------------------------===//
503
504// Alias instructions that map zero vector to pxor.
Chris Lattner17dab4a2008-01-10 05:45:39 +0000505let isReMaterializable = 1 in {
Evan Chengb783fa32007-07-19 01:14:50 +0000506 def MMX_V_SET0 : MMXI<0xEF, MRMInitReg, (outs VR64:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +0000507 "pxor\t$dst, $dst",
Chris Lattnere6aa3862007-11-25 00:24:49 +0000508 [(set VR64:$dst, (v2i32 immAllZerosV))]>;
Evan Chengb783fa32007-07-19 01:14:50 +0000509 def MMX_V_SETALLONES : MMXI<0x76, MRMInitReg, (outs VR64:$dst), (ins),
Dan Gohman91888f02007-07-31 20:11:57 +0000510 "pcmpeqd\t$dst, $dst",
Chris Lattnere6aa3862007-11-25 00:24:49 +0000511 [(set VR64:$dst, (v2i32 immAllOnesV))]>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000512}
513
Evan Chenga15896e2008-03-12 07:02:50 +0000514let Predicates = [HasMMX] in {
515 def : Pat<(v1i64 immAllZerosV), (MMX_V_SET0)>;
516 def : Pat<(v4i16 immAllZerosV), (MMX_V_SET0)>;
517 def : Pat<(v8i8 immAllZerosV), (MMX_V_SET0)>;
518}
519
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000520//===----------------------------------------------------------------------===//
521// Non-Instruction Patterns
522//===----------------------------------------------------------------------===//
523
524// Store 64-bit integer vector values.
525def : Pat<(store (v8i8 VR64:$src), addr:$dst),
526 (MMX_MOVQ64mr addr:$dst, VR64:$src)>;
527def : Pat<(store (v4i16 VR64:$src), addr:$dst),
528 (MMX_MOVQ64mr addr:$dst, VR64:$src)>;
529def : Pat<(store (v2i32 VR64:$src), addr:$dst),
530 (MMX_MOVQ64mr addr:$dst, VR64:$src)>;
531def : Pat<(store (v1i64 VR64:$src), addr:$dst),
532 (MMX_MOVQ64mr addr:$dst, VR64:$src)>;
533
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000534// Bit convert.
535def : Pat<(v8i8 (bitconvert (v1i64 VR64:$src))), (v8i8 VR64:$src)>;
536def : Pat<(v8i8 (bitconvert (v2i32 VR64:$src))), (v8i8 VR64:$src)>;
537def : Pat<(v8i8 (bitconvert (v4i16 VR64:$src))), (v8i8 VR64:$src)>;
538def : Pat<(v4i16 (bitconvert (v1i64 VR64:$src))), (v4i16 VR64:$src)>;
539def : Pat<(v4i16 (bitconvert (v2i32 VR64:$src))), (v4i16 VR64:$src)>;
540def : Pat<(v4i16 (bitconvert (v8i8 VR64:$src))), (v4i16 VR64:$src)>;
541def : Pat<(v2i32 (bitconvert (v1i64 VR64:$src))), (v2i32 VR64:$src)>;
542def : Pat<(v2i32 (bitconvert (v4i16 VR64:$src))), (v2i32 VR64:$src)>;
543def : Pat<(v2i32 (bitconvert (v8i8 VR64:$src))), (v2i32 VR64:$src)>;
544def : Pat<(v1i64 (bitconvert (v2i32 VR64:$src))), (v1i64 VR64:$src)>;
545def : Pat<(v1i64 (bitconvert (v4i16 VR64:$src))), (v1i64 VR64:$src)>;
546def : Pat<(v1i64 (bitconvert (v8i8 VR64:$src))), (v1i64 VR64:$src)>;
547
548// 64-bit bit convert.
549def : Pat<(v1i64 (bitconvert (i64 GR64:$src))),
550 (MMX_MOVD64to64rr GR64:$src)>;
551def : Pat<(v2i32 (bitconvert (i64 GR64:$src))),
552 (MMX_MOVD64to64rr GR64:$src)>;
553def : Pat<(v4i16 (bitconvert (i64 GR64:$src))),
554 (MMX_MOVD64to64rr GR64:$src)>;
555def : Pat<(v8i8 (bitconvert (i64 GR64:$src))),
556 (MMX_MOVD64to64rr GR64:$src)>;
Dan Gohman4535ae32008-04-15 23:55:07 +0000557def : Pat<(i64 (bitconvert (v1i64 VR64:$src))),
558 (MMX_MOVD64from64rr VR64:$src)>;
559def : Pat<(i64 (bitconvert (v2i32 VR64:$src))),
560 (MMX_MOVD64from64rr VR64:$src)>;
561def : Pat<(i64 (bitconvert (v4i16 VR64:$src))),
562 (MMX_MOVD64from64rr VR64:$src)>;
563def : Pat<(i64 (bitconvert (v8i8 VR64:$src))),
564 (MMX_MOVD64from64rr VR64:$src)>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000565
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000566// Move scalar to XMM zero-extended
567// movd to XMM register zero-extends
568let AddedComplexity = 15 in {
Evan Chenge9b9c672008-05-09 21:53:03 +0000569 def : Pat<(v8i8 (X86vzmovl (bc_v8i8 (v2i32 (scalar_to_vector GR32:$src))))),
Evan Cheng40ee6e52008-05-08 00:57:18 +0000570 (MMX_MOVZDI2PDIrr GR32:$src)>;
Evan Chenge9b9c672008-05-09 21:53:03 +0000571 def : Pat<(v4i16 (X86vzmovl (bc_v4i16 (v2i32 (scalar_to_vector GR32:$src))))),
Evan Cheng40ee6e52008-05-08 00:57:18 +0000572 (MMX_MOVZDI2PDIrr GR32:$src)>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000573}
574
Evan Chengd1045a62008-02-18 23:04:32 +0000575// Scalar to v4i16 / v8i8. The source may be a GR32, but only the lower
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000576// 8 or 16-bits matter.
Evan Chengd1045a62008-02-18 23:04:32 +0000577def : Pat<(bc_v8i8 (v2i32 (scalar_to_vector GR32:$src))),
578 (MMX_MOVD64rr GR32:$src)>;
579def : Pat<(bc_v4i16 (v2i32 (scalar_to_vector GR32:$src))),
580 (MMX_MOVD64rr GR32:$src)>;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000581
582// Patterns to perform canonical versions of vector shuffling.
583let AddedComplexity = 10 in {
584 def : Pat<(v8i8 (vector_shuffle VR64:$src, (undef),
585 MMX_UNPCKL_v_undef_shuffle_mask)),
586 (MMX_PUNPCKLBWrr VR64:$src, VR64:$src)>;
587 def : Pat<(v4i16 (vector_shuffle VR64:$src, (undef),
588 MMX_UNPCKL_v_undef_shuffle_mask)),
589 (MMX_PUNPCKLWDrr VR64:$src, VR64:$src)>;
590 def : Pat<(v2i32 (vector_shuffle VR64:$src, (undef),
591 MMX_UNPCKL_v_undef_shuffle_mask)),
592 (MMX_PUNPCKLDQrr VR64:$src, VR64:$src)>;
593}
594
595let AddedComplexity = 10 in {
596 def : Pat<(v8i8 (vector_shuffle VR64:$src, (undef),
597 MMX_UNPCKH_v_undef_shuffle_mask)),
598 (MMX_PUNPCKHBWrr VR64:$src, VR64:$src)>;
599 def : Pat<(v4i16 (vector_shuffle VR64:$src, (undef),
600 MMX_UNPCKH_v_undef_shuffle_mask)),
601 (MMX_PUNPCKHWDrr VR64:$src, VR64:$src)>;
602 def : Pat<(v2i32 (vector_shuffle VR64:$src, (undef),
603 MMX_UNPCKH_v_undef_shuffle_mask)),
604 (MMX_PUNPCKHDQrr VR64:$src, VR64:$src)>;
605}
606
607// Patterns to perform vector shuffling with a zeroed out vector.
608let AddedComplexity = 20 in {
609 def : Pat<(bc_v2i32 (vector_shuffle immAllZerosV,
610 (v2i32 (scalar_to_vector (load_mmx addr:$src))),
611 MMX_UNPCKL_shuffle_mask)),
612 (MMX_PUNPCKLDQrm VR64:$src, VR64:$src)>;
613}
614
615// Some special case PANDN patterns.
616// FIXME: Get rid of these.
617def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v2i32 immAllOnesV))),
618 VR64:$src2)),
619 (MMX_PANDNrr VR64:$src1, VR64:$src2)>;
Chris Lattnere6aa3862007-11-25 00:24:49 +0000620def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v4i16 immAllOnesV_bc))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000621 VR64:$src2)),
622 (MMX_PANDNrr VR64:$src1, VR64:$src2)>;
Chris Lattnere6aa3862007-11-25 00:24:49 +0000623def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v8i8 immAllOnesV_bc))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000624 VR64:$src2)),
625 (MMX_PANDNrr VR64:$src1, VR64:$src2)>;
626
627def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v2i32 immAllOnesV))),
628 (load addr:$src2))),
629 (MMX_PANDNrm VR64:$src1, addr:$src2)>;
Chris Lattnere6aa3862007-11-25 00:24:49 +0000630def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v4i16 immAllOnesV_bc))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000631 (load addr:$src2))),
632 (MMX_PANDNrm VR64:$src1, addr:$src2)>;
Chris Lattnere6aa3862007-11-25 00:24:49 +0000633def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v8i8 immAllOnesV_bc))),
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000634 (load addr:$src2))),
635 (MMX_PANDNrm VR64:$src1, addr:$src2)>;
Evan Cheng2aea0b42008-04-25 19:11:04 +0000636
637// Move MMX to lower 64-bit of XMM
638def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert VR64:$src)))),
639 (v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
Evan Cheng1428f582008-04-25 20:12:46 +0000640
641// Move lower 64-bit of XMM to MMX.
642def : Pat<(v2i32 (bitconvert (i64 (vector_extract (v2i64 VR128:$src),
643 (iPTR 0))))),
644 (v2i32 (MMX_MOVDQ2Qrr VR128:$src))>;
645def : Pat<(v4i16 (bitconvert (i64 (vector_extract (v2i64 VR128:$src),
646 (iPTR 0))))),
647 (v4i16 (MMX_MOVDQ2Qrr VR128:$src))>;
648def : Pat<(v8i8 (bitconvert (i64 (vector_extract (v2i64 VR128:$src),
649 (iPTR 0))))),
650 (v8i8 (MMX_MOVDQ2Qrr VR128:$src))>;
651