Revert mmx palignr to use an intrinsic, since mmx shuffle patterns are missing.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@91269 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 5d5caa2..c704432 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -805,8 +805,11 @@
Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
return Builder.CreateStore(Ops[1], Ops[0]);
}
- case X86::BI__builtin_ia32_palignr128:
case X86::BI__builtin_ia32_palignr: {
+ Function *F = CGM.getIntrinsic(Intrinsic::x86_ssse3_palign_r);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size());
+ }
+ case X86::BI__builtin_ia32_palignr128: {
unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
// If palignr is shifting the pair of input vectors less than 17 bytes,