Have SPU handle halfvec stores aligned by 8 bytes.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@110576 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp
index 7a6bb0f..1dc969b 100644
--- a/lib/Target/CellSPU/SPUISelLowering.cpp
+++ b/lib/Target/CellSPU/SPUISelLowering.cpp
@@ -470,6 +470,9 @@
 
   setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
 
+  setOperationAction(ISD::STORE, MVT::v2i32, Custom);
+  setOperationAction(ISD::STORE, MVT::v2f32, Custom);
+
   setShiftAmountType(MVT::i32);
   setBooleanContents(ZeroOrNegativeOneBooleanContent);
 
@@ -518,6 +521,8 @@
     node_names[(unsigned) SPUISD::ADD64_MARKER] = "SPUISD::ADD64_MARKER";
     node_names[(unsigned) SPUISD::SUB64_MARKER] = "SPUISD::SUB64_MARKER";
     node_names[(unsigned) SPUISD::MUL64_MARKER] = "SPUISD::MUL64_MARKER";
+    node_names[(unsigned) SPUISD::HALF2VEC] = "SPUISD::HALF2VEC";
+    node_names[(unsigned) SPUISD::VEC2HALF] = "SPUISD::VEC2HALF";
   }
 
   std::map<unsigned, const char *>::iterator i = node_names.find(Opcode);
@@ -738,12 +743,14 @@
   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
   DebugLoc dl = Op.getDebugLoc();
   unsigned alignment = SN->getAlignment();
+  const bool isVec = VT.isVector();
+  EVT eltTy = isVec ? VT.getVectorElementType(): VT;
 
   switch (SN->getAddressingMode()) {
   case ISD::UNINDEXED: {
     // The vector type we really want to load from the 16-byte chunk.
     EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
-                                 VT, (128 / VT.getSizeInBits()));
+                                 eltTy, (128 / eltTy.getSizeInBits()));
 
     SDValue alignLoadVec;
     SDValue basePtr = SN->getBasePtr();
@@ -752,7 +759,6 @@
 
     if (alignment == 16) {
       ConstantSDNode *CN;
-
       // Special cases for a known aligned load to simplify the base pointer
       // and insertion byte:
       if (basePtr.getOpcode() == ISD::ADD
@@ -776,6 +782,9 @@
         insertEltOffs = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
                                     basePtr,
                                     DAG.getConstant(0, PtrVT));
+        basePtr = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
+                                    basePtr,
+                                    DAG.getConstant(0, PtrVT));
       }
     } else {
       // Unaligned load: must be more pessimistic about addressing modes:
@@ -812,8 +821,8 @@
                                   DAG.getConstant(0, PtrVT));
     }
 
-    // Re-emit as a v16i8 vector load
-    alignLoadVec = DAG.getLoad(MVT::v16i8, dl, the_chain, basePtr,
+    // Load the memory to which to store.
+    alignLoadVec = DAG.getLoad(vecVT, dl, the_chain, basePtr,
                                SN->getSrcValue(), SN->getSrcValueOffset(),
                                SN->isVolatile(), SN->isNonTemporal(), 16);
 
@@ -844,11 +853,19 @@
       }
 #endif
 
-    SDValue insertEltOp =
-            DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT, insertEltOffs);
-    SDValue vectorizeOp =
-            DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT, theValue);
-
+    SDValue insertEltOp;
+    SDValue vectorizeOp;
+    if (isVec)
+    {
+      // FIXME: this works only if the vector is 64bit!
+      insertEltOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, MVT::v2i64, insertEltOffs);
+      vectorizeOp = DAG.getNode(SPUISD::HALF2VEC, dl, vecVT, theValue);
+    }
+    else
+    {
+      insertEltOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT, insertEltOffs);
+      vectorizeOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT, theValue);
+    }
     result = DAG.getNode(SPUISD::SHUFB, dl, vecVT,
                          vectorizeOp, alignLoadVec,
                          DAG.getNode(ISD::BIT_CONVERT, dl,