Revert r129401 for now. Clang is using the old way of doing things.

llvm-svn: 129403
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 85ab916..21df57c 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -916,6 +916,7 @@
     case X86::MOVSDrm:
     case X86::MOVAPSrm:
     case X86::MOVUPSrm:
+    case X86::MOVUPSrm_Int:
     case X86::MOVAPDrm:
     case X86::MOVDQArm:
     case X86::MMX_MOVD64rm:
@@ -2844,9 +2845,11 @@
   case X86::FsMOVAPDrm:
   case X86::MOVAPSrm:
   case X86::MOVUPSrm:
+  case X86::MOVUPSrm_Int:
   case X86::MOVAPDrm:
   case X86::MOVDQArm:
   case X86::MOVDQUrm:
+  case X86::MOVDQUrm_Int:
     break;
   }
   switch (Opc2) {
@@ -2866,9 +2869,11 @@
   case X86::FsMOVAPDrm:
   case X86::MOVAPSrm:
   case X86::MOVUPSrm:
+  case X86::MOVUPSrm_Int:
   case X86::MOVAPDrm:
   case X86::MOVDQArm:
   case X86::MOVDQUrm:
+  case X86::MOVDQUrm_Int:
     break;
   }
 
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index c36e975..8f08e68 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -329,6 +329,15 @@
 
 // Intrinsic forms of MOVUPS/D load and store
 let isAsmParserOnly = 0 in {
+  let canFoldAsLoad = 1, isReMaterializable = 1 in
+  def VMOVUPSrm_Int : VPSI<0x10, MRMSrcMem, (outs VR128:$dst),
+             (ins f128mem:$src),
+             "movups\t{$src, $dst|$dst, $src}",
+             [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>, VEX;
+  def VMOVUPDrm_Int : VPDI<0x10, MRMSrcMem, (outs VR128:$dst),
+             (ins f128mem:$src),
+             "movupd\t{$src, $dst|$dst, $src}",
+             [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>, VEX;
   def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
              (ins f128mem:$dst, VR128:$src),
              "movups\t{$src, $dst|$dst, $src}",
@@ -338,6 +347,13 @@
              "movupd\t{$src, $dst|$dst, $src}",
              [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
 }
+let canFoldAsLoad = 1, isReMaterializable = 1 in
+def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+                       "movups\t{$src, $dst|$dst, $src}",
+                       [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
+def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
+                       "movupd\t{$src, $dst|$dst, $src}",
+                       [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
 
 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
                        "movups\t{$src, $dst|$dst, $src}",
@@ -2213,12 +2229,22 @@
 
 // Intrinsic forms of MOVDQU load and store
 let isAsmParserOnly = 0 in {
+let canFoldAsLoad = 1 in
+def VMOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+                       "vmovdqu\t{$src, $dst|$dst, $src}",
+                       [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
+                     XS, VEX, Requires<[HasAVX]>;
 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
                        "vmovdqu\t{$src, $dst|$dst, $src}",
                        [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
                      XS, VEX, Requires<[HasAVX]>;
 }
 
+let canFoldAsLoad = 1 in
+def MOVDQUrm_Int :   I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
+                       "movdqu\t{$src, $dst|$dst, $src}",
+                       [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
+                 XS, Requires<[HasSSE2]>;
 def MOVDQUmr_Int :   I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
                        "movdqu\t{$src, $dst|$dst, $src}",
                        [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 726105f..875e9ca 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -537,7 +537,11 @@
     break;
   case Intrinsic::ppc_altivec_lvx:
   case Intrinsic::ppc_altivec_lvxl:
-    // Turn PPC lvx -> load if the pointer is known aligned.
+  case Intrinsic::x86_sse_loadu_ps:
+  case Intrinsic::x86_sse2_loadu_pd:
+  case Intrinsic::x86_sse2_loadu_dq:
+    // Turn PPC lvx     -> load if the pointer is known aligned.
+    // Turn X86 loadups -> load if the pointer is known aligned.
     if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
       Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
                                          PointerType::getUnqual(II->getType()));
diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index f6e2c88..87e78fa 100644
--- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -572,6 +572,9 @@
     switch (II->getIntrinsicID()) {
       default: break;
       case Intrinsic::prefetch:
+      case Intrinsic::x86_sse2_loadu_dq:
+      case Intrinsic::x86_sse2_loadu_pd:
+      case Intrinsic::x86_sse_loadu_ps:
       case Intrinsic::x86_sse_storeu_ps:
       case Intrinsic::x86_sse2_storeu_pd:
       case Intrinsic::x86_sse2_storeu_dq:
diff --git a/llvm/lib/VMCore/AutoUpgrade.cpp b/llvm/lib/VMCore/AutoUpgrade.cpp
index 4541f38..9e551bb 100644
--- a/llvm/lib/VMCore/AutoUpgrade.cpp
+++ b/llvm/lib/VMCore/AutoUpgrade.cpp
@@ -527,12 +527,6 @@
       // or 0.
       NewFn = 0;
       return true;           
-    } else if (Name.compare(5, 16, "x86.sse.loadu.ps", 16) == 0 ||
-               Name.compare(5, 17, "x86.sse2.loadu.dq", 17) == 0 ||
-               Name.compare(5, 17, "x86.sse2.loadu.pd", 17) == 0) {
-      // Calls to these instructions are transformed into unaligned loads.
-      NewFn = 0;
-      return true;
     } else if (Name.compare(5, 17, "x86.ssse3.pshuf.w", 17) == 0) {
       // This is an SSE/MMX instruction.
       const Type *X86_MMXTy = VectorType::getX86_MMXTy(FTy->getContext());
@@ -952,29 +946,7 @@
         
       // Remove upgraded instruction.
       CI->eraseFromParent();
-    
-    } else if (F->getName() == "llvm.x86.sse.loadu.ps" ||
-               F->getName() == "llvm.x86.sse2.loadu.dq" ||
-               F->getName() == "llvm.x86.sse2.loadu.pd") {
-      // Convert to a native, unaligned load.
-      const Type *VecTy = CI->getType();
-      const Type *IntTy = IntegerType::get(C, 128);
-      IRBuilder<> Builder(C);
-      Builder.SetInsertPoint(CI->getParent(), CI);
-
-      Value *BC = Builder.CreateBitCast(CI->getArgOperand(0),
-                                        PointerType::getUnqual(IntTy),
-                                        "cast");
-      LoadInst *LI = Builder.CreateLoad(BC, CI->getName());
-      LI->setAlignment(1);      // Unaligned load.
-      BC = Builder.CreateBitCast(LI, VecTy, "new.cast");
-
-      // Fix up all the uses with our new load.
-      if (!CI->use_empty())
-        CI->replaceAllUsesWith(BC);
-
-      // Remove intrinsic.
-      CI->eraseFromParent();
+      
     } else {
       llvm_unreachable("Unknown function for CallInst upgrade.");
     }