blob: f5d28eda6d7de3149982e7fea7bf49d5a2286928 [file] [log] [blame]
David Greene51898d72010-02-09 23:52:19 +00001//======- X86InstrFragmentsSIMD.td - x86 ISA -------------*- tablegen -*-=====//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file provides pattern fragments useful for SIMD instructions.
11//
12//===----------------------------------------------------------------------===//
13
14//===----------------------------------------------------------------------===//
15// MMX Pattern Fragments
16//===----------------------------------------------------------------------===//
17
18def load_mmx : PatFrag<(ops node:$ptr), (v1i64 (load node:$ptr))>;
19
20def bc_v8i8 : PatFrag<(ops node:$in), (v8i8 (bitconvert node:$in))>;
21def bc_v4i16 : PatFrag<(ops node:$in), (v4i16 (bitconvert node:$in))>;
22def bc_v2i32 : PatFrag<(ops node:$in), (v2i32 (bitconvert node:$in))>;
23def bc_v1i64 : PatFrag<(ops node:$in), (v1i64 (bitconvert node:$in))>;
24
25//===----------------------------------------------------------------------===//
26// MMX Masks
27//===----------------------------------------------------------------------===//
28
29// MMX_SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to
30// PSHUFW imm.
31def MMX_SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
32 return getI8Imm(X86::getShuffleSHUFImmediate(N));
33}]>;
34
35// Patterns for: vector_shuffle v1, v2, <2, 6, 3, 7, ...>
36def mmx_unpckh : PatFrag<(ops node:$lhs, node:$rhs),
37 (vector_shuffle node:$lhs, node:$rhs), [{
38 return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
39}]>;
40
41// Patterns for: vector_shuffle v1, v2, <0, 4, 2, 5, ...>
42def mmx_unpckl : PatFrag<(ops node:$lhs, node:$rhs),
43 (vector_shuffle node:$lhs, node:$rhs), [{
44 return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
45}]>;
46
47// Patterns for: vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
48def mmx_unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
49 (vector_shuffle node:$lhs, node:$rhs), [{
50 return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
51}]>;
52
53// Patterns for: vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
54def mmx_unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
55 (vector_shuffle node:$lhs, node:$rhs), [{
56 return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
57}]>;
58
59def mmx_pshufw : PatFrag<(ops node:$lhs, node:$rhs),
60 (vector_shuffle node:$lhs, node:$rhs), [{
61 return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
62}], MMX_SHUFFLE_get_shuf_imm>;
David Greene8f17bc42010-07-12 23:41:28 +000063
64//===----------------------------------------------------------------------===//
65// SSE specific DAG Nodes.
66//===----------------------------------------------------------------------===//
67
68def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
69 SDTCisFP<0>, SDTCisInt<2> ]>;
70def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
71 SDTCisFP<1>, SDTCisVT<3, i8>]>;
72
73def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
74def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
75def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
76 [SDNPCommutative, SDNPAssociative]>;
77def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
78 [SDNPCommutative, SDNPAssociative]>;
79def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
80 [SDNPCommutative, SDNPAssociative]>;
81def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
82def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
83def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
84def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
85def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
86def X86pshufb : SDNode<"X86ISD::PSHUFB",
87 SDTypeProfile<1, 2, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
88 SDTCisSameAs<0,2>]>>;
89def X86pextrb : SDNode<"X86ISD::PEXTRB",
90 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
91def X86pextrw : SDNode<"X86ISD::PEXTRW",
92 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
93def X86pinsrb : SDNode<"X86ISD::PINSRB",
94 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
95 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
96def X86pinsrw : SDNode<"X86ISD::PINSRW",
97 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
98 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
99def X86insrtps : SDNode<"X86ISD::INSERTPS",
100 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
101 SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
102def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
103 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
104def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
105 [SDNPHasChain, SDNPMayLoad]>;
106def X86vshl : SDNode<"X86ISD::VSHL", SDTIntShiftOp>;
107def X86vshr : SDNode<"X86ISD::VSRL", SDTIntShiftOp>;
108def X86cmpps : SDNode<"X86ISD::CMPPS", SDTX86VFCMP>;
109def X86cmppd : SDNode<"X86ISD::CMPPD", SDTX86VFCMP>;
110def X86pcmpeqb : SDNode<"X86ISD::PCMPEQB", SDTIntBinOp, [SDNPCommutative]>;
111def X86pcmpeqw : SDNode<"X86ISD::PCMPEQW", SDTIntBinOp, [SDNPCommutative]>;
112def X86pcmpeqd : SDNode<"X86ISD::PCMPEQD", SDTIntBinOp, [SDNPCommutative]>;
113def X86pcmpeqq : SDNode<"X86ISD::PCMPEQQ", SDTIntBinOp, [SDNPCommutative]>;
114def X86pcmpgtb : SDNode<"X86ISD::PCMPGTB", SDTIntBinOp>;
115def X86pcmpgtw : SDNode<"X86ISD::PCMPGTW", SDTIntBinOp>;
116def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>;
117def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
118
119def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
Bruno Cardoso Lopes045573c2010-08-10 23:25:42 +0000120 SDTCisVec<1>,
121 SDTCisSameAs<2, 1>]>;
David Greene8f17bc42010-07-12 23:41:28 +0000122def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
Bruno Cardoso Lopes045573c2010-08-10 23:25:42 +0000123def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
David Greene8f17bc42010-07-12 23:41:28 +0000124
125//===----------------------------------------------------------------------===//
126// SSE Complex Patterns
127//===----------------------------------------------------------------------===//
128
129// These are 'extloads' from a scalar to the low element of a vector, zeroing
130// the top elements. These are used for the SSE 'ss' and 'sd' instruction
131// forms.
132def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
133 [SDNPHasChain, SDNPMayLoad]>;
134def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
135 [SDNPHasChain, SDNPMayLoad]>;
136
137def ssmem : Operand<v4f32> {
138 let PrintMethod = "printf32mem";
139 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
140 let ParserMatchClass = X86MemAsmOperand;
141}
142def sdmem : Operand<v2f64> {
143 let PrintMethod = "printf64mem";
144 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
145 let ParserMatchClass = X86MemAsmOperand;
146}
147
148//===----------------------------------------------------------------------===//
149// SSE pattern fragments
150//===----------------------------------------------------------------------===//
151
152def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
153def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
154def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
155def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
156
157// FIXME: move this to a more appropriate place after all AVX is done.
158def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
159def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
160def loadv8i32 : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>;
161def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
162
163// Like 'store', but always requires vector alignment.
164def alignedstore : PatFrag<(ops node:$val, node:$ptr),
165 (store node:$val, node:$ptr), [{
166 return cast<StoreSDNode>(N)->getAlignment() >= 16;
167}]>;
168
169// Like 'load', but always requires vector alignment.
170def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
171 return cast<LoadSDNode>(N)->getAlignment() >= 16;
172}]>;
173
174def alignedloadfsf32 : PatFrag<(ops node:$ptr),
175 (f32 (alignedload node:$ptr))>;
176def alignedloadfsf64 : PatFrag<(ops node:$ptr),
177 (f64 (alignedload node:$ptr))>;
178def alignedloadv4f32 : PatFrag<(ops node:$ptr),
179 (v4f32 (alignedload node:$ptr))>;
180def alignedloadv2f64 : PatFrag<(ops node:$ptr),
181 (v2f64 (alignedload node:$ptr))>;
182def alignedloadv4i32 : PatFrag<(ops node:$ptr),
183 (v4i32 (alignedload node:$ptr))>;
184def alignedloadv2i64 : PatFrag<(ops node:$ptr),
185 (v2i64 (alignedload node:$ptr))>;
186
187// FIXME: move this to a more appropriate place after all AVX is done.
188def alignedloadv8f32 : PatFrag<(ops node:$ptr),
189 (v8f32 (alignedload node:$ptr))>;
190def alignedloadv4f64 : PatFrag<(ops node:$ptr),
191 (v4f64 (alignedload node:$ptr))>;
192def alignedloadv8i32 : PatFrag<(ops node:$ptr),
193 (v8i32 (alignedload node:$ptr))>;
194def alignedloadv4i64 : PatFrag<(ops node:$ptr),
195 (v4i64 (alignedload node:$ptr))>;
196
197// Like 'load', but uses special alignment checks suitable for use in
198// memory operands in most SSE instructions, which are required to
199// be naturally aligned on some targets but not on others. If the subtarget
200// allows unaligned accesses, match any load, though this may require
201// setting a feature bit in the processor (on startup, for example).
202// Opteron 10h and later implement such a feature.
203def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
204 return Subtarget->hasVectorUAMem()
205 || cast<LoadSDNode>(N)->getAlignment() >= 16;
206}]>;
207
208def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
209def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
210def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
211def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
212def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>;
213def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
214def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>;
215
216// FIXME: move this to a more appropriate place after all AVX is done.
Bruno Cardoso Lopes94143ee2010-07-19 23:32:44 +0000217def memopv32i8 : PatFrag<(ops node:$ptr), (v32i8 (memop node:$ptr))>;
David Greene8f17bc42010-07-12 23:41:28 +0000218def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
219def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
Bruno Cardoso Lopesbd2d90f2010-08-06 20:03:27 +0000220def memopv4i64 : PatFrag<(ops node:$ptr), (v4i64 (memop node:$ptr))>;
221def memopv8i32 : PatFrag<(ops node:$ptr), (v8i32 (memop node:$ptr))>;
David Greene8f17bc42010-07-12 23:41:28 +0000222
223// SSSE3 uses MMX registers for some instructions. They aren't aligned on a
224// 16-byte boundary.
225// FIXME: 8 byte alignment for mmx reads is not required
226def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
227 return cast<LoadSDNode>(N)->getAlignment() >= 8;
228}]>;
229
230def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
231def memopv4i16 : PatFrag<(ops node:$ptr), (v4i16 (memop64 node:$ptr))>;
232def memopv8i16 : PatFrag<(ops node:$ptr), (v8i16 (memop64 node:$ptr))>;
233def memopv2i32 : PatFrag<(ops node:$ptr), (v2i32 (memop64 node:$ptr))>;
234
235// MOVNT Support
236// Like 'store', but requires the non-temporal bit to be set
237def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
238 (st node:$val, node:$ptr), [{
239 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
240 return ST->isNonTemporal();
241 return false;
242}]>;
243
244def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
245 (st node:$val, node:$ptr), [{
246 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
247 return ST->isNonTemporal() && !ST->isTruncatingStore() &&
248 ST->getAddressingMode() == ISD::UNINDEXED &&
249 ST->getAlignment() >= 16;
250 return false;
251}]>;
252
253def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
254 (st node:$val, node:$ptr), [{
255 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
256 return ST->isNonTemporal() &&
257 ST->getAlignment() < 16;
258 return false;
259}]>;
260
261def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
262def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
263def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
264def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
265def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
266def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
267
Bruno Cardoso Lopes2b691432010-07-21 23:53:50 +0000268// FIXME: move this to a more appropriate place after all AVX is done.
269def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>;
270
David Greene8f17bc42010-07-12 23:41:28 +0000271def vzmovl_v2i64 : PatFrag<(ops node:$src),
272 (bitconvert (v2i64 (X86vzmovl
273 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
274def vzmovl_v4i32 : PatFrag<(ops node:$src),
275 (bitconvert (v4i32 (X86vzmovl
276 (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
277
278def vzload_v2i64 : PatFrag<(ops node:$src),
279 (bitconvert (v2i64 (X86vzload node:$src)))>;
280
281
282def fp32imm0 : PatLeaf<(f32 fpimm), [{
283 return N->isExactlyValue(+0.0);
284}]>;
285
286// BYTE_imm - Transform bit immediates into byte immediates.
287def BYTE_imm : SDNodeXForm<imm, [{
288 // Transformation function: imm >> 3
289 return getI32Imm(N->getZExtValue() >> 3);
290}]>;
291
292// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
293// SHUFP* etc. imm.
294def SHUFFLE_get_shuf_imm : SDNodeXForm<vector_shuffle, [{
295 return getI8Imm(X86::getShuffleSHUFImmediate(N));
296}]>;
297
298// SHUFFLE_get_pshufhw_imm xform function: convert vector_shuffle mask to
299// PSHUFHW imm.
300def SHUFFLE_get_pshufhw_imm : SDNodeXForm<vector_shuffle, [{
301 return getI8Imm(X86::getShufflePSHUFHWImmediate(N));
302}]>;
303
304// SHUFFLE_get_pshuflw_imm xform function: convert vector_shuffle mask to
305// PSHUFLW imm.
306def SHUFFLE_get_pshuflw_imm : SDNodeXForm<vector_shuffle, [{
307 return getI8Imm(X86::getShufflePSHUFLWImmediate(N));
308}]>;
309
310// SHUFFLE_get_palign_imm xform function: convert vector_shuffle mask to
311// a PALIGNR imm.
312def SHUFFLE_get_palign_imm : SDNodeXForm<vector_shuffle, [{
313 return getI8Imm(X86::getShufflePALIGNRImmediate(N));
314}]>;
315
316def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
317 (vector_shuffle node:$lhs, node:$rhs), [{
318 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
319 return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
320}]>;
321
322def movddup : PatFrag<(ops node:$lhs, node:$rhs),
323 (vector_shuffle node:$lhs, node:$rhs), [{
324 return X86::isMOVDDUPMask(cast<ShuffleVectorSDNode>(N));
325}]>;
326
327def movhlps : PatFrag<(ops node:$lhs, node:$rhs),
328 (vector_shuffle node:$lhs, node:$rhs), [{
329 return X86::isMOVHLPSMask(cast<ShuffleVectorSDNode>(N));
330}]>;
331
332def movhlps_undef : PatFrag<(ops node:$lhs, node:$rhs),
333 (vector_shuffle node:$lhs, node:$rhs), [{
334 return X86::isMOVHLPS_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
335}]>;
336
337def movlhps : PatFrag<(ops node:$lhs, node:$rhs),
338 (vector_shuffle node:$lhs, node:$rhs), [{
339 return X86::isMOVLHPSMask(cast<ShuffleVectorSDNode>(N));
340}]>;
341
342def movlp : PatFrag<(ops node:$lhs, node:$rhs),
343 (vector_shuffle node:$lhs, node:$rhs), [{
344 return X86::isMOVLPMask(cast<ShuffleVectorSDNode>(N));
345}]>;
346
347def movl : PatFrag<(ops node:$lhs, node:$rhs),
348 (vector_shuffle node:$lhs, node:$rhs), [{
349 return X86::isMOVLMask(cast<ShuffleVectorSDNode>(N));
350}]>;
351
352def movshdup : PatFrag<(ops node:$lhs, node:$rhs),
353 (vector_shuffle node:$lhs, node:$rhs), [{
354 return X86::isMOVSHDUPMask(cast<ShuffleVectorSDNode>(N));
355}]>;
356
357def movsldup : PatFrag<(ops node:$lhs, node:$rhs),
358 (vector_shuffle node:$lhs, node:$rhs), [{
359 return X86::isMOVSLDUPMask(cast<ShuffleVectorSDNode>(N));
360}]>;
361
362def unpckl : PatFrag<(ops node:$lhs, node:$rhs),
363 (vector_shuffle node:$lhs, node:$rhs), [{
364 return X86::isUNPCKLMask(cast<ShuffleVectorSDNode>(N));
365}]>;
366
367def unpckh : PatFrag<(ops node:$lhs, node:$rhs),
368 (vector_shuffle node:$lhs, node:$rhs), [{
369 return X86::isUNPCKHMask(cast<ShuffleVectorSDNode>(N));
370}]>;
371
372def unpckl_undef : PatFrag<(ops node:$lhs, node:$rhs),
373 (vector_shuffle node:$lhs, node:$rhs), [{
374 return X86::isUNPCKL_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
375}]>;
376
377def unpckh_undef : PatFrag<(ops node:$lhs, node:$rhs),
378 (vector_shuffle node:$lhs, node:$rhs), [{
379 return X86::isUNPCKH_v_undef_Mask(cast<ShuffleVectorSDNode>(N));
380}]>;
381
382def pshufd : PatFrag<(ops node:$lhs, node:$rhs),
383 (vector_shuffle node:$lhs, node:$rhs), [{
384 return X86::isPSHUFDMask(cast<ShuffleVectorSDNode>(N));
385}], SHUFFLE_get_shuf_imm>;
386
387def shufp : PatFrag<(ops node:$lhs, node:$rhs),
388 (vector_shuffle node:$lhs, node:$rhs), [{
389 return X86::isSHUFPMask(cast<ShuffleVectorSDNode>(N));
390}], SHUFFLE_get_shuf_imm>;
391
392def pshufhw : PatFrag<(ops node:$lhs, node:$rhs),
393 (vector_shuffle node:$lhs, node:$rhs), [{
394 return X86::isPSHUFHWMask(cast<ShuffleVectorSDNode>(N));
395}], SHUFFLE_get_pshufhw_imm>;
396
397def pshuflw : PatFrag<(ops node:$lhs, node:$rhs),
398 (vector_shuffle node:$lhs, node:$rhs), [{
399 return X86::isPSHUFLWMask(cast<ShuffleVectorSDNode>(N));
400}], SHUFFLE_get_pshuflw_imm>;
401
402def palign : PatFrag<(ops node:$lhs, node:$rhs),
403 (vector_shuffle node:$lhs, node:$rhs), [{
404 return X86::isPALIGNRMask(cast<ShuffleVectorSDNode>(N));
405}], SHUFFLE_get_palign_imm>;