Support x86's PALIGNR instruction without the use of a palignr intrinsic.


git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@91264 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/include/clang/Basic/BuiltinsX86.def b/include/clang/Basic/BuiltinsX86.def
index bbf42ee..adb1b76 100644
--- a/include/clang/Basic/BuiltinsX86.def
+++ b/include/clang/Basic/BuiltinsX86.def
@@ -250,8 +250,8 @@
 BUILTIN(__builtin_ia32_monitor, "vv*UiUi", "")
 BUILTIN(__builtin_ia32_mwait, "vUiUi", "")
 BUILTIN(__builtin_ia32_lddqu, "V16ccC*", "")
-BUILTIN(__builtin_ia32_palignr128, "V2LLiV2LLiV2LLic", "")
-BUILTIN(__builtin_ia32_palignr, "V1LLiV1LLiV1LLic", "")
+BUILTIN(__builtin_ia32_palignr128, "V16cV16cV16cc", "")
+BUILTIN(__builtin_ia32_palignr, "V8cV8cV8cc", "")
 BUILTIN(__builtin_ia32_insertps128, "V4fV4fV4fi", "")
 
 BUILTIN(__builtin_ia32_storelv4si, "vV2i*V2LLi", "")
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 02bf97c..5d5caa2 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -807,10 +807,38 @@
   }
   case X86::BI__builtin_ia32_palignr128:
   case X86::BI__builtin_ia32_palignr: {
-    Function *F = CGM.getIntrinsic(BuiltinID == X86::BI__builtin_ia32_palignr128 ?
-				   Intrinsic::x86_ssse3_palign_r_128 :
-				   Intrinsic::x86_ssse3_palign_r);
-    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size());
+    unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+    
+    // If palignr is shifting the pair of input vectors less than 17 bytes,
+    // emit a shuffle instruction.
+    if (shiftVal <= 16) {
+      const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
+
+      llvm::SmallVector<llvm::Constant*, 16> Indices;
+      for (unsigned i = 0; i != 16; ++i)
+        Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i));
+      
+      Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+      return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
+    }
+    
+    // If palignr is shifting the pair of input vectors more than 16 but less
+    // than 32 bytes, emit a logical right shift of the destination.
+    if (shiftVal < 32) {
+      const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
+      const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
+      const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
+      
+      Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+      Ops[1] = llvm::ConstantInt::get(IntTy, (shiftVal-16) * 8);
+      
+      // create i32 constant
+      llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
+      return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr");
+    }
+    
+    // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+    return llvm::Constant::getNullValue(ConvertType(E->getType()));
   }
   }
 }
diff --git a/lib/Headers/tmmintrin.h b/lib/Headers/tmmintrin.h
index 00bfa27..374a27e 100644
--- a/lib/Headers/tmmintrin.h
+++ b/lib/Headers/tmmintrin.h
@@ -66,8 +66,8 @@
     return (__m128i)__builtin_ia32_pabsd128((__v4si)a);
 }
 
-#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n*8)))
-#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n*8)))
+#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
+#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n)))
 
 static inline __m128i __attribute__((__always_inline__, __nodebug__))
 _mm_hadd_epi16(__m128i a, __m128i b)
diff --git a/test/CodeGen/palignr.c b/test/CodeGen/palignr.c
new file mode 100644
index 0000000..c0c7e77
--- /dev/null
+++ b/test/CodeGen/palignr.c
@@ -0,0 +1,15 @@
+// RUN: clang-cc %s -triple=i686-apple-darwin -target-feature +ssse3 -O1 -S -o - | FileCheck %s
+
+#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
+typedef __attribute__((vector_size(16))) int int4;
+
+// CHECK: palignr
+int4 align1(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 15); }
+// CHECK: ret
+// CHECK: ret
+// CHECK-NOT: palignr
+int4 align2(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 16); }
+// CHECK: psrldq
+int4 align3(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 17); }
+// CHECK: xorps
+int4 align4(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 32); }