Handle BUILD_VECTOR with all zero elements.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27056 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 5649d64..e9178f2 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -277,7 +277,7 @@
setOperationAction(ISD::SUB, MVT::v4f32, Legal);
setOperationAction(ISD::MUL, MVT::v4f32, Legal);
setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Expand);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
}
@@ -300,10 +300,11 @@
setOperationAction(ISD::LOAD, MVT::v8i16, Legal);
setOperationAction(ISD::LOAD, MVT::v4i32, Legal);
setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Expand);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Expand);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Expand);
- setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Expand);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
@@ -1529,6 +1530,23 @@
return Mask;
}
+/// isZeroVector - Return true if all elements of BUILD_VECTOR are 0 or +0.0.
+bool X86::isZeroVector(SDNode *N) {
+ for (SDNode::op_iterator I = N->op_begin(), E = N->op_end();
+ I != E; ++I) {
+ if (ConstantFPSDNode *FPC = dyn_cast<ConstantFPSDNode>(*I)) {
+ if (!FPC->isExactlyValue(+0.0))
+ return false;
+ } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(*I)) {
+ if (!C->isNullValue())
+ return false;
+ } else
+ return false;
+ }
+
+ return true;
+}
+
/// LowerOperation - Provide custom lowering hooks for some operations.
///
SDOperand X86TargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
@@ -2348,10 +2366,28 @@
return SDOperand();
}
- // TODO.
- assert(0 && "TODO");
+ assert(0 && "Unexpected VECTOR_SHUFFLE to lower");
abort();
}
+ case ISD::BUILD_VECTOR: {
+ bool isZero = true;
+ unsigned NumElems = Op.getNumOperands();
+ for (unsigned i = 0; i < NumElems; ++i) {
+ SDOperand V = Op.getOperand(i);
+ if (ConstantFPSDNode *FPC = dyn_cast<ConstantFPSDNode>(V)) {
+ if (!FPC->isExactlyValue(+0.0))
+ isZero = false;
+ } else if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(V)) {
+ if (!C->isNullValue())
+ isZero = false;
+ } else
+ isZero = false;
+ }
+
+ if (isZero)
+ return Op;
+ return SDOperand();
+ }
}
}
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index bc4a746..854f76d 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -208,6 +208,9 @@
/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
/// instructions.
unsigned getShuffleSHUFImmediate(SDNode *N);
+
+ /// isZeroVector - Return true if all elements of BUILD_VECTOR are 0 or +0.0.
+ bool isZeroVector(SDNode *N);
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/X86/X86InstrFPStack.td b/lib/Target/X86/X86InstrFPStack.td
index 03b615c..3283ed6 100644
--- a/lib/Target/X86/X86InstrFPStack.td
+++ b/lib/Target/X86/X86InstrFPStack.td
@@ -50,10 +50,6 @@
// FPStack pattern fragments
//===----------------------------------------------------------------------===//
-def fp32imm0 : PatLeaf<(f32 fpimm), [{
- return N->isExactlyValue(+0.0);
-}]>;
-
def fp64imm0 : PatLeaf<(f64 fpimm), [{
return N->isExactlyValue(+0.0);
}]>;
diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td
index e2ec85d..2011f1e 100644
--- a/lib/Target/X86/X86InstrSSE.td
+++ b/lib/Target/X86/X86InstrSSE.td
@@ -45,6 +45,14 @@
def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>;
def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
+def fp32imm0 : PatLeaf<(f32 fpimm), [{
+ return N->isExactlyValue(+0.0);
+}]>;
+
+def vecimm0 : PatLeaf<(build_vector), [{
+ return X86::isZeroVector(N);
+}]>;
+
// SHUFFLE_get_shuf_imm xform function: convert vector_shuffle mask to PSHUF*,
// SHUFP* etc. imm.
def SHUFFLE_get_shuf_imm : SDNodeXForm<build_vector, [{
@@ -834,6 +842,25 @@
// Alias Instructions
//===----------------------------------------------------------------------===//
+// Alias instructions that map zero vector to xorp* for sse.
+// FIXME: remove when we can teach regalloc that xor reg, reg is ok.
+def VZEROv16i8 : I<0xEF, MRMInitReg, (ops VR128:$dst),
+ "pxor $dst, $dst", [(set VR128:$dst, (v16i8 vecimm0))]>,
+ Requires<[HasSSE2]>, TB, OpSize;
+def VZEROv8i16 : I<0xEF, MRMInitReg, (ops VR128:$dst),
+ "pxor $dst, $dst", [(set VR128:$dst, (v8i16 vecimm0))]>,
+ Requires<[HasSSE2]>, TB, OpSize;
+def VZEROv4i32 : I<0xEF, MRMInitReg, (ops VR128:$dst),
+ "pxor $dst, $dst", [(set VR128:$dst, (v4i32 vecimm0))]>,
+ Requires<[HasSSE2]>, TB, OpSize;
+def VZEROv2i64 : I<0xEF, MRMInitReg, (ops VR128:$dst),
+ "pxor $dst, $dst", [(set VR128:$dst, (v2i64 vecimm0))]>,
+ Requires<[HasSSE2]>, TB, OpSize;
+def VZEROv4f32 : PSI<0x57, MRMInitReg, (ops VR128:$dst),
+ "xorps $dst, $dst", [(set VR128:$dst, (v4f32 vecimm0))]>;
+def VZEROv2f64 : PDI<0x57, MRMInitReg, (ops VR128:$dst),
+ "xorpd $dst, $dst", [(set VR128:$dst, (v2f64 vecimm0))]>;
+
def FR32ToV4F32 : PSI<0x28, MRMSrcReg, (ops VR128:$dst, FR32:$src),
"movaps {$src, $dst|$dst, $src}",
[(set VR128:$dst,