Added support to allow clients to custom widen. For X86, custom widen vectors for
divide/remainder since these operations can trap by unroll them and adding undefs
for the resulting vector.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@90108 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 8567ca4..d80b8ec 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -975,6 +975,19 @@
 
   computeRegisterProperties();
 
+  // Divide and reminder operations have no vector equivalent and can
+  // trap. Do a custom widening for these operations in which we never
+  // generate more divides/remainder than the original vector width.
+  for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
+       VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
+    if (!isTypeLegal((MVT::SimpleValueType)VT)) {
+      setOperationAction(ISD::SDIV, (MVT::SimpleValueType) VT, Custom);
+      setOperationAction(ISD::UDIV, (MVT::SimpleValueType) VT, Custom);
+      setOperationAction(ISD::SREM, (MVT::SimpleValueType) VT, Custom);
+      setOperationAction(ISD::UREM, (MVT::SimpleValueType) VT, Custom);
+    }
+  }
+
   // FIXME: These should be based on subtarget info. Plus, the values should
   // be smaller when we are in optimizing for size mode.
   maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
@@ -7170,6 +7183,14 @@
     Results.push_back(edx.getValue(1));
     return;
   }
+  case ISD::SDIV:
+  case ISD::UDIV:
+  case ISD::SREM:
+  case ISD::UREM: {
+    EVT WidenVT = getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+    Results.push_back(DAG.UnrollVectorOp(N, WidenVT.getVectorNumElements()));
+    return;
+  }
   case ISD::ATOMIC_CMP_SWAP: {
     EVT T = N->getValueType(0);
     assert (T == MVT::i64 && "Only know how to expand i64 Cmp and Swap");