Rewrite handling of 64-bit palignr intrinsics to be vector shuffles.
Stop multiplying constant by 8 accordingly in the header and change
intrinsic definition for what types we expect.
Add to existing palignr test to check that we're emitting the correct things.
llvm-svn: 101332
diff --git a/clang/test/CodeGen/palignr.c b/clang/test/CodeGen/palignr.c
index 627e309..6297b2e 100644
--- a/clang/test/CodeGen/palignr.c
+++ b/clang/test/CodeGen/palignr.c
@@ -1,13 +1,9 @@
 // RUN: %clang_cc1 %s -triple=i686-apple-darwin -target-feature +ssse3 -O1 -S -o - | FileCheck %s
 
 #define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
-#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n*8)))
-typedef __attribute__((vector_size(8))) int int2;
 typedef __attribute__((vector_size(16))) int int4;
 
 // CHECK: palignr
-int2 mmx_align1(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 7); }
-// CHECK: palignr
 int4 align1(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 15); }
 // CHECK: ret
 // CHECK: ret
@@ -17,3 +13,18 @@
 int4 align3(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 17); }
 // CHECK: xor
 int4 align4(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 32); }
+
+#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n)))
+typedef __attribute__((vector_size(8))) int int2;
+
+// CHECK-NOT: palignr
+int2 align5(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 8); }
+
+// CHECK: psrlq
+int2 align6(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 9); }
+
+// CHECK: xor
+int2 align7(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 16); }
+
+// CHECK: palignr
+int2 align8(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 7); }
\ No newline at end of file