David Greene | 51898d7 | 2010-02-09 23:52:19 +0000 | [diff] [blame] | 1 | //======- X86InstrFragmentsSIMD.td - x86 ISA -------------*- tablegen -*-=====// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file provides pattern fragments useful for SIMD instructions. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | //===----------------------------------------------------------------------===// |
| 15 | // MMX Pattern Fragments |
| 16 | //===----------------------------------------------------------------------===// |
| 17 | |
| 18 | def load_mmx : PatFrag<(ops node:$ptr), (v1i64 (load node:$ptr))>; |
| 19 | |
| 20 | def bc_v8i8 : PatFrag<(ops node:$in), (v8i8 (bitconvert node:$in))>; |
| 21 | def bc_v4i16 : PatFrag<(ops node:$in), (v4i16 (bitconvert node:$in))>; |
| 22 | def bc_v2i32 : PatFrag<(ops node:$in), (v2i32 (bitconvert node:$in))>; |
| 23 | def bc_v1i64 : PatFrag<(ops node:$in), (v1i64 (bitconvert node:$in))>; |
| 24 | |
| 25 | //===----------------------------------------------------------------------===// |
| 26 | // MMX Masks |
| 27 | //===----------------------------------------------------------------------===// |
| 28 | |
| 29 | // MMX_SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to |
| 30 | // PSHUFW imm. |
| 31 | def MMX_SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{ |
| 32 | return getI8Imm(X86::getShuffleSHUFImmediate(N)); |
| 33 | }]>; |
| 34 | |
| 35 | // Patterns for: vector_shuffle v1, v2, <2, 6, 3, 7, ...> |
| 36 | def mmx_unpckh : PatFrag<(ops node:$lhs, node:$rhs), |
| 37 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 38 | return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N)); |
| 39 | }]>; |
| 40 | |
| 41 | // Patterns for: vector_shuffle v1, v2, <0, 4, 2, 5, ...> |
| 42 | def mmx_unpckl : PatFrag<(ops node:$lhs, node:$rhs), |
| 43 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 44 | return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N)); |
| 45 | }]>; |
| 46 | |
| 47 | // Patterns for: vector_shuffle v1, <undef>, <0, 0, 1, 1, ...> |
| 48 | def mmx_unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs), |
| 49 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 50 | return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N)); |
| 51 | }]>; |
| 52 | |
| 53 | // Patterns for: vector_shuffle v1, <undef>, <2, 2, 3, 3, ...> |
| 54 | def mmx_unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs), |
| 55 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 56 | return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N)); |
| 57 | }]>; |
| 58 | |
| 59 | def mmx_pshufw : PatFrag<(ops node:$lhs, node:$rhs), |
| 60 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 61 | return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N)); |
| 62 | }], MMX_SHUFFLE_get_shuf_imm>; |
David Greene | 8f17bc4 | 2010-07-12 23:41:28 +0000 | [diff] [blame] | 63 | |
| 64 | //===----------------------------------------------------------------------===// |
| 65 | // SSE specific DAG Nodes. |
| 66 | //===----------------------------------------------------------------------===// |
| 67 | |
| 68 | def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>, |
| 69 | SDTCisFP<0>, SDTCisInt<2> ]>; |
| 70 | def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>, |
| 71 | SDTCisFP<1>, SDTCisVT<3, i8>]>; |
| 72 | |
| 73 | def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>; |
| 74 | def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>; |
| 75 | def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp, |
| 76 | [SDNPCommutative, SDNPAssociative]>; |
| 77 | def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp, |
| 78 | [SDNPCommutative, SDNPAssociative]>; |
| 79 | def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp, |
| 80 | [SDNPCommutative, SDNPAssociative]>; |
| 81 | def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>; |
| 82 | def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>; |
| 83 | def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>; |
| 84 | def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>; |
| 85 | def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>; |
| 86 | def X86pshufb : SDNode<"X86ISD::PSHUFB", |
| 87 | SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>, |
| 88 | SDTCisSameAs<0,2>]>>; |
| 89 | def X86pextrb : SDNode<"X86ISD::PEXTRB", |
| 90 | SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>; |
| 91 | def X86pextrw : SDNode<"X86ISD::PEXTRW", |
| 92 | SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>; |
| 93 | def X86pinsrb : SDNode<"X86ISD::PINSRB", |
| 94 | SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>, |
| 95 | SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>; |
| 96 | def X86pinsrw : SDNode<"X86ISD::PINSRW", |
| 97 | SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>, |
| 98 | SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>; |
| 99 | def X86insrtps : SDNode<"X86ISD::INSERTPS", |
| 100 | SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>, |
| 101 | SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>; |
| 102 | def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL", |
| 103 | SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>; |
| 104 | def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad, |
| 105 | [SDNPHasChain, SDNPMayLoad]>; |
| 106 | def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>; |
| 107 | def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>; |
| 108 | def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>; |
| 109 | def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>; |
| 110 | def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>; |
| 111 | def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>; |
| 112 | def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>; |
| 113 | def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>; |
| 114 | def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>; |
| 115 | def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>; |
| 116 | def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>; |
| 117 | def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>; |
| 118 | |
| 119 | def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, |
| 120 | SDTCisVT<1, v4f32>, |
| 121 | SDTCisVT<2, v4f32>]>; |
| 122 | def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>; |
| 123 | |
| 124 | //===----------------------------------------------------------------------===// |
| 125 | // SSE Complex Patterns |
| 126 | //===----------------------------------------------------------------------===// |
| 127 | |
| 128 | // These are 'extloads' from a scalar to the low element of a vector, zeroing |
| 129 | // the top elements. These are used for the SSE 'ss' and 'sd' instruction |
| 130 | // forms. |
| 131 | def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [], |
| 132 | [SDNPHasChain, SDNPMayLoad]>; |
| 133 | def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [], |
| 134 | [SDNPHasChain, SDNPMayLoad]>; |
| 135 | |
| 136 | def ssmem : Operand<v4f32> { |
| 137 | let PrintMethod = "printf32mem"; |
| 138 | let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm); |
| 139 | let ParserMatchClass = X86MemAsmOperand; |
| 140 | } |
| 141 | def sdmem : Operand<v2f64> { |
| 142 | let PrintMethod = "printf64mem"; |
| 143 | let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm); |
| 144 | let ParserMatchClass = X86MemAsmOperand; |
| 145 | } |
| 146 | |
| 147 | //===----------------------------------------------------------------------===// |
| 148 | // SSE pattern fragments |
| 149 | //===----------------------------------------------------------------------===// |
| 150 | |
| 151 | def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>; |
| 152 | def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>; |
| 153 | def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>; |
| 154 | def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>; |
| 155 | |
| 156 | // FIXME: move this to a more appropriate place after all AVX is done. |
| 157 | def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>; |
| 158 | def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>; |
| 159 | def loadv8i32 : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>; |
| 160 | def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>; |
| 161 | |
| 162 | // Like 'store', but always requires vector alignment. |
| 163 | def alignedstore : PatFrag<(ops node:$val, node:$ptr), |
| 164 | (store node:$val, node:$ptr), [{ |
| 165 | return cast<StoreSDNode>(N)->getAlignment() >= 16; |
| 166 | }]>; |
| 167 | |
| 168 | // Like 'load', but always requires vector alignment. |
| 169 | def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ |
| 170 | return cast<LoadSDNode>(N)->getAlignment() >= 16; |
| 171 | }]>; |
| 172 | |
| 173 | def alignedloadfsf32 : PatFrag<(ops node:$ptr), |
| 174 | (f32 (alignedload node:$ptr))>; |
| 175 | def alignedloadfsf64 : PatFrag<(ops node:$ptr), |
| 176 | (f64 (alignedload node:$ptr))>; |
| 177 | def alignedloadv4f32 : PatFrag<(ops node:$ptr), |
| 178 | (v4f32 (alignedload node:$ptr))>; |
| 179 | def alignedloadv2f64 : PatFrag<(ops node:$ptr), |
| 180 | (v2f64 (alignedload node:$ptr))>; |
| 181 | def alignedloadv4i32 : PatFrag<(ops node:$ptr), |
| 182 | (v4i32 (alignedload node:$ptr))>; |
| 183 | def alignedloadv2i64 : PatFrag<(ops node:$ptr), |
| 184 | (v2i64 (alignedload node:$ptr))>; |
| 185 | |
| 186 | // FIXME: move this to a more appropriate place after all AVX is done. |
| 187 | def alignedloadv8f32 : PatFrag<(ops node:$ptr), |
| 188 | (v8f32 (alignedload node:$ptr))>; |
| 189 | def alignedloadv4f64 : PatFrag<(ops node:$ptr), |
| 190 | (v4f64 (alignedload node:$ptr))>; |
| 191 | def alignedloadv8i32 : PatFrag<(ops node:$ptr), |
| 192 | (v8i32 (alignedload node:$ptr))>; |
| 193 | def alignedloadv4i64 : PatFrag<(ops node:$ptr), |
| 194 | (v4i64 (alignedload node:$ptr))>; |
| 195 | |
| 196 | // Like 'load', but uses special alignment checks suitable for use in |
| 197 | // memory operands in most SSE instructions, which are required to |
| 198 | // be naturally aligned on some targets but not on others. If the subtarget |
| 199 | // allows unaligned accesses, match any load, though this may require |
| 200 | // setting a feature bit in the processor (on startup, for example). |
| 201 | // Opteron 10h and later implement such a feature. |
| 202 | def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{ |
| 203 | return Subtarget->hasVectorUAMem() |
| 204 | || cast<LoadSDNode>(N)->getAlignment() >= 16; |
| 205 | }]>; |
| 206 | |
| 207 | def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>; |
| 208 | def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>; |
| 209 | def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>; |
| 210 | def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>; |
| 211 | def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>; |
| 212 | def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>; |
| 213 | def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>; |
| 214 | |
| 215 | // FIXME: move this to a more appropriate place after all AVX is done. |
Bruno Cardoso Lopes | 94143ee | 2010-07-19 23:32:44 +0000 | [diff] [blame] | 216 | def memopv32i8 : PatFrag<(ops node:$ptr), (v32i8 (memop node:$ptr))>; |
David Greene | 8f17bc4 | 2010-07-12 23:41:28 +0000 | [diff] [blame] | 217 | def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>; |
| 218 | def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>; |
Bruno Cardoso Lopes | bd2d90f | 2010-08-06 20:03:27 +0000 | [diff] [blame^] | 219 | def memopv4i64 : PatFrag<(ops node:$ptr), (v4i64 (memop node:$ptr))>; |
| 220 | def memopv8i32 : PatFrag<(ops node:$ptr), (v8i32 (memop node:$ptr))>; |
David Greene | 8f17bc4 | 2010-07-12 23:41:28 +0000 | [diff] [blame] | 221 | |
| 222 | // SSSE3 uses MMX registers for some instructions. They aren't aligned on a |
| 223 | // 16-byte boundary. |
| 224 | // FIXME: 8 byte alignment for mmx reads is not required |
| 225 | def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ |
| 226 | return cast<LoadSDNode>(N)->getAlignment() >= 8; |
| 227 | }]>; |
| 228 | |
| 229 | def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>; |
| 230 | def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>; |
| 231 | def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>; |
| 232 | def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>; |
| 233 | |
| 234 | // MOVNT Support |
| 235 | // Like 'store', but requires the non-temporal bit to be set |
| 236 | def nontemporalstore : PatFrag<(ops node:$val, node:$ptr), |
| 237 | (st node:$val, node:$ptr), [{ |
| 238 | if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) |
| 239 | return ST->isNonTemporal(); |
| 240 | return false; |
| 241 | }]>; |
| 242 | |
| 243 | def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr), |
| 244 | (st node:$val, node:$ptr), [{ |
| 245 | if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) |
| 246 | return ST->isNonTemporal() && !ST->isTruncatingStore() && |
| 247 | ST->getAddressingMode() == ISD::UNINDEXED && |
| 248 | ST->getAlignment() >= 16; |
| 249 | return false; |
| 250 | }]>; |
| 251 | |
| 252 | def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr), |
| 253 | (st node:$val, node:$ptr), [{ |
| 254 | if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) |
| 255 | return ST->isNonTemporal() && |
| 256 | ST->getAlignment() < 16; |
| 257 | return false; |
| 258 | }]>; |
| 259 | |
| 260 | def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>; |
| 261 | def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>; |
| 262 | def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>; |
| 263 | def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>; |
| 264 | def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>; |
| 265 | def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>; |
| 266 | |
Bruno Cardoso Lopes | 2b69143 | 2010-07-21 23:53:50 +0000 | [diff] [blame] | 267 | // FIXME: move this to a more appropriate place after all AVX is done. |
| 268 | def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>; |
| 269 | |
David Greene | 8f17bc4 | 2010-07-12 23:41:28 +0000 | [diff] [blame] | 270 | def vzmovl_v2i64 : PatFrag<(ops node:$src), |
| 271 | (bitconvert (v2i64 (X86vzmovl |
| 272 | (v2i64 (scalar_to_vector (loadi64 node:$src))))))>; |
| 273 | def vzmovl_v4i32 : PatFrag<(ops node:$src), |
| 274 | (bitconvert (v4i32 (X86vzmovl |
| 275 | (v4i32 (scalar_to_vector (loadi32 node:$src))))))>; |
| 276 | |
| 277 | def vzload_v2i64 : PatFrag<(ops node:$src), |
| 278 | (bitconvert (v2i64 (X86vzload node:$src)))>; |
| 279 | |
| 280 | |
| 281 | def fp32imm0 : PatLeaf<(f32 fpimm), [{ |
| 282 | return N->isExactlyValue(+0.0); |
| 283 | }]>; |
| 284 | |
| 285 | // BYTE_imm - Transform bit immediates into byte immediates. |
| 286 | def BYTE_imm : SDNodeXForm<imm, [{ |
| 287 | // Transformation function: imm >> 3 |
| 288 | return getI32Imm(N->getZExtValue() >> 3); |
| 289 | }]>; |
| 290 | |
| 291 | // SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*, |
| 292 | // SHUFP* etc. imm. |
| 293 | def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{ |
| 294 | return getI8Imm(X86::getShuffleSHUFImmediate(N)); |
| 295 | }]>; |
| 296 | |
| 297 | // SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to |
| 298 | // PSHUFHW imm. |
| 299 | def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{ |
| 300 | return getI8Imm(X86::getShufflePSHUFHWImmediate(N)); |
| 301 | }]>; |
| 302 | |
| 303 | // SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to |
| 304 | // PSHUFLW imm. |
| 305 | def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{ |
| 306 | return getI8Imm(X86::getShufflePSHUFLWImmediate(N)); |
| 307 | }]>; |
| 308 | |
| 309 | // SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to |
| 310 | // a PALIGNR imm. |
| 311 | def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{ |
| 312 | return getI8Imm(X86::getShufflePALIGNRImmediate(N)); |
| 313 | }]>; |
| 314 | |
| 315 | def splat_lo : PatFrag<(ops node:$lhs, node:$rhs), |
| 316 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 317 | ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); |
| 318 | return SVOp->isSplat() && SVOp->getSplatIndex() == 0; |
| 319 | }]>; |
| 320 | |
| 321 | def movddup : PatFrag<(ops node:$lhs, node:$rhs), |
| 322 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 323 | return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N)); |
| 324 | }]>; |
| 325 | |
| 326 | def movhlps : PatFrag<(ops node:$lhs, node:$rhs), |
| 327 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 328 | return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N)); |
| 329 | }]>; |
| 330 | |
| 331 | def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs), |
| 332 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 333 | return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N)); |
| 334 | }]>; |
| 335 | |
| 336 | def movlhps : PatFrag<(ops node:$lhs, node:$rhs), |
| 337 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 338 | return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N)); |
| 339 | }]>; |
| 340 | |
| 341 | def movlp : PatFrag<(ops node:$lhs, node:$rhs), |
| 342 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 343 | return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N)); |
| 344 | }]>; |
| 345 | |
| 346 | def movl : PatFrag<(ops node:$lhs, node:$rhs), |
| 347 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 348 | return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N)); |
| 349 | }]>; |
| 350 | |
| 351 | def movshdup : PatFrag<(ops node:$lhs, node:$rhs), |
| 352 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 353 | return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N)); |
| 354 | }]>; |
| 355 | |
| 356 | def movsldup : PatFrag<(ops node:$lhs, node:$rhs), |
| 357 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 358 | return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N)); |
| 359 | }]>; |
| 360 | |
| 361 | def unpckl : PatFrag<(ops node:$lhs, node:$rhs), |
| 362 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 363 | return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N)); |
| 364 | }]>; |
| 365 | |
| 366 | def unpckh : PatFrag<(ops node:$lhs, node:$rhs), |
| 367 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 368 | return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N)); |
| 369 | }]>; |
| 370 | |
| 371 | def unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs), |
| 372 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 373 | return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N)); |
| 374 | }]>; |
| 375 | |
| 376 | def unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs), |
| 377 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 378 | return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N)); |
| 379 | }]>; |
| 380 | |
| 381 | def pshufd : PatFrag<(ops node:$lhs, node:$rhs), |
| 382 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 383 | return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N)); |
| 384 | }], SHUFFLE_get_shuf_imm>; |
| 385 | |
| 386 | def shufp : PatFrag<(ops node:$lhs, node:$rhs), |
| 387 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 388 | return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N)); |
| 389 | }], SHUFFLE_get_shuf_imm>; |
| 390 | |
| 391 | def pshufhw : PatFrag<(ops node:$lhs, node:$rhs), |
| 392 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 393 | return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N)); |
| 394 | }], SHUFFLE_get_pshufhw_imm>; |
| 395 | |
| 396 | def pshuflw : PatFrag<(ops node:$lhs, node:$rhs), |
| 397 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 398 | return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N)); |
| 399 | }], SHUFFLE_get_pshuflw_imm>; |
| 400 | |
| 401 | def palign : PatFrag<(ops node:$lhs, node:$rhs), |
| 402 | (vector_shuffle node:$lhs, node:$rhs), [{ |
| 403 | return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N)); |
| 404 | }], SHUFFLE_get_palign_imm>; |