[x86] enable machine combiner reassociations for scalar double-precision adds

llvm-svn: 241871
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 5484ae9..fdfdac9 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -6408,7 +6408,9 @@
 //       2. Other math / logic operations (and, or)
 static bool isAssociativeAndCommutative(unsigned Opcode) {
   switch (Opcode) {
+  case X86::ADDSDrr:
   case X86::ADDSSrr:
+  case X86::VADDSDrr:
   case X86::VADDSSrr:
   case X86::MULSSrr:
   case X86::VMULSSrr:
diff --git a/llvm/test/CodeGen/X86/machine-combiner.ll b/llvm/test/CodeGen/X86/machine-combiner.ll
index 2286da7..ae059a1 100644
--- a/llvm/test/CodeGen/X86/machine-combiner.ll
+++ b/llvm/test/CodeGen/X86/machine-combiner.ll
@@ -144,7 +144,7 @@
   ret float %t2
 }
 
-; Verify that SSE and AVX scalar single precison multiplies are reassociated.
+; Verify that SSE and AVX scalar single-precison multiplies are reassociated.
 
 define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) {
 ; SSE-LABEL: reassociate_muls1:
@@ -165,3 +165,25 @@
   %t2 = fmul float %x3, %t1
   ret float %t2
 }
+
+; Verify that SSE and AVX scalar double-precison adds are reassociated.
+
+define double @reassociate_adds_double(double %x0, double %x1, double %x2, double %x3) {
+; SSE-LABEL: reassociate_adds_double:
+; SSE:       # BB#0:
+; SSE-NEXT:    divsd %xmm1, %xmm0
+; SSE-NEXT:    addsd %xmm3, %xmm2
+; SSE-NEXT:    addsd %xmm2, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: reassociate_adds_double:
+; AVX:       # BB#0:
+; AVX-NEXT:    vdivsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    vaddsd %xmm3, %xmm2, %xmm1
+; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %t0 = fdiv double %x0, %x1
+  %t1 = fadd double %x2, %t0
+  %t2 = fadd double %x3, %t1
+  ret double %t2
+}