[X86] Implement the fusing of MUL+SUBADD to FMSUBADD
This patch turns shuffles of fadd/fsub with fmul into fmsubadd.
Patch by Dmitry Venikov
Differential Revision: https://reviews.llvm.org/D40335
llvm-svn: 321200
diff --git a/llvm/test/CodeGen/X86/fmsubadd-combine.ll b/llvm/test/CodeGen/X86/fmsubadd-combine.ll
index 814d61e..ca2c61a 100644
--- a/llvm/test/CodeGen/X86/fmsubadd-combine.ll
+++ b/llvm/test/CodeGen/X86/fmsubadd-combine.ll
@@ -8,26 +8,17 @@
define <2 x double> @mul_subadd_pd128(<2 x double> %A, <2 x double> %B, <2 x double> %C) #0 {
; FMA3_256-LABEL: mul_subadd_pd128:
; FMA3_256: # %bb.0: # %entry
-; FMA3_256-NEXT: vmulpd %xmm1, %xmm0, %xmm0
-; FMA3_256-NEXT: vsubpd %xmm2, %xmm0, %xmm1
-; FMA3_256-NEXT: vaddpd %xmm2, %xmm0, %xmm0
-; FMA3_256-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; FMA3_256-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0
; FMA3_256-NEXT: retq
;
; FMA3_512-LABEL: mul_subadd_pd128:
; FMA3_512: # %bb.0: # %entry
-; FMA3_512-NEXT: vmulpd %xmm1, %xmm0, %xmm0
-; FMA3_512-NEXT: vsubpd %xmm2, %xmm0, %xmm1
-; FMA3_512-NEXT: vaddpd %xmm2, %xmm0, %xmm0
-; FMA3_512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; FMA3_512-NEXT: vfmsubadd213pd %xmm2, %xmm1, %xmm0
; FMA3_512-NEXT: retq
;
; FMA4-LABEL: mul_subadd_pd128:
; FMA4: # %bb.0: # %entry
-; FMA4-NEXT: vmulpd %xmm1, %xmm0, %xmm0
-; FMA4-NEXT: vsubpd %xmm2, %xmm0, %xmm1
-; FMA4-NEXT: vaddpd %xmm2, %xmm0, %xmm0
-; FMA4-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; FMA4-NEXT: vfmsubaddpd %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
entry:
%AB = fmul <2 x double> %A, %B
@@ -40,18 +31,12 @@
define <4 x float> @mul_subadd_ps128(<4 x float> %A, <4 x float> %B, <4 x float> %C) #0 {
; FMA3-LABEL: mul_subadd_ps128:
; FMA3: # %bb.0: # %entry
-; FMA3-NEXT: vmulps %xmm1, %xmm0, %xmm0
-; FMA3-NEXT: vsubps %xmm2, %xmm0, %xmm1
-; FMA3-NEXT: vaddps %xmm2, %xmm0, %xmm0
-; FMA3-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; FMA3-NEXT: vfmsubadd213ps %xmm2, %xmm1, %xmm0
; FMA3-NEXT: retq
;
; FMA4-LABEL: mul_subadd_ps128:
; FMA4: # %bb.0: # %entry
-; FMA4-NEXT: vmulps %xmm1, %xmm0, %xmm0
-; FMA4-NEXT: vsubps %xmm2, %xmm0, %xmm1
-; FMA4-NEXT: vaddps %xmm2, %xmm0, %xmm0
-; FMA4-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
+; FMA4-NEXT: vfmsubaddps %xmm2, %xmm1, %xmm0, %xmm0
; FMA4-NEXT: retq
entry:
%AB = fmul <4 x float> %A, %B
@@ -64,18 +49,12 @@
define <4 x double> @mul_subadd_pd256(<4 x double> %A, <4 x double> %B, <4 x double> %C) #0 {
; FMA3-LABEL: mul_subadd_pd256:
; FMA3: # %bb.0: # %entry
-; FMA3-NEXT: vmulpd %ymm1, %ymm0, %ymm0
-; FMA3-NEXT: vsubpd %ymm2, %ymm0, %ymm1
-; FMA3-NEXT: vaddpd %ymm2, %ymm0, %ymm0
-; FMA3-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
+; FMA3-NEXT: vfmsubadd213pd %ymm2, %ymm1, %ymm0
; FMA3-NEXT: retq
;
; FMA4-LABEL: mul_subadd_pd256:
; FMA4: # %bb.0: # %entry
-; FMA4-NEXT: vmulpd %ymm1, %ymm0, %ymm0
-; FMA4-NEXT: vsubpd %ymm2, %ymm0, %ymm1
-; FMA4-NEXT: vaddpd %ymm2, %ymm0, %ymm0
-; FMA4-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3]
+; FMA4-NEXT: vfmsubaddpd %ymm2, %ymm1, %ymm0, %ymm0
; FMA4-NEXT: retq
entry:
%AB = fmul <4 x double> %A, %B
@@ -88,18 +67,12 @@
define <8 x float> @mul_subadd_ps256(<8 x float> %A, <8 x float> %B, <8 x float> %C) #0 {
; FMA3-LABEL: mul_subadd_ps256:
; FMA3: # %bb.0: # %entry
-; FMA3-NEXT: vmulps %ymm1, %ymm0, %ymm0
-; FMA3-NEXT: vsubps %ymm2, %ymm0, %ymm1
-; FMA3-NEXT: vaddps %ymm2, %ymm0, %ymm0
-; FMA3-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; FMA3-NEXT: vfmsubadd213ps %ymm2, %ymm1, %ymm0
; FMA3-NEXT: retq
;
; FMA4-LABEL: mul_subadd_ps256:
; FMA4: # %bb.0: # %entry
-; FMA4-NEXT: vmulps %ymm1, %ymm0, %ymm0
-; FMA4-NEXT: vsubps %ymm2, %ymm0, %ymm1
-; FMA4-NEXT: vaddps %ymm2, %ymm0, %ymm0
-; FMA4-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
+; FMA4-NEXT: vfmsubaddps %ymm2, %ymm1, %ymm0, %ymm0
; FMA4-NEXT: retq
entry:
%AB = fmul <8 x float> %A, %B
@@ -112,34 +85,19 @@
define <8 x double> @mul_subadd_pd512(<8 x double> %A, <8 x double> %B, <8 x double> %C) #0 {
; FMA3_256-LABEL: mul_subadd_pd512:
; FMA3_256: # %bb.0: # %entry
-; FMA3_256-NEXT: vmulpd %ymm2, %ymm0, %ymm0
-; FMA3_256-NEXT: vmulpd %ymm3, %ymm1, %ymm1
-; FMA3_256-NEXT: vsubpd %ymm5, %ymm1, %ymm2
-; FMA3_256-NEXT: vsubpd %ymm4, %ymm0, %ymm3
-; FMA3_256-NEXT: vaddpd %ymm5, %ymm1, %ymm1
-; FMA3_256-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3]
-; FMA3_256-NEXT: vaddpd %ymm4, %ymm0, %ymm0
-; FMA3_256-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3]
+; FMA3_256-NEXT: vfmsubadd213pd %ymm4, %ymm2, %ymm0
+; FMA3_256-NEXT: vfmsubadd213pd %ymm5, %ymm3, %ymm1
; FMA3_256-NEXT: retq
;
; FMA3_512-LABEL: mul_subadd_pd512:
; FMA3_512: # %bb.0: # %entry
-; FMA3_512-NEXT: vmulpd %zmm1, %zmm0, %zmm0
-; FMA3_512-NEXT: vsubpd %zmm2, %zmm0, %zmm1
-; FMA3_512-NEXT: vaddpd %zmm2, %zmm0, %zmm0
-; FMA3_512-NEXT: vshufpd {{.*#+}} zmm0 = zmm0[0],zmm1[1],zmm0[2],zmm1[3],zmm0[4],zmm1[5],zmm0[6],zmm1[7]
+; FMA3_512-NEXT: vfmsubadd213pd %zmm2, %zmm1, %zmm0
; FMA3_512-NEXT: retq
;
; FMA4-LABEL: mul_subadd_pd512:
; FMA4: # %bb.0: # %entry
-; FMA4-NEXT: vmulpd %ymm2, %ymm0, %ymm0
-; FMA4-NEXT: vmulpd %ymm3, %ymm1, %ymm1
-; FMA4-NEXT: vsubpd %ymm5, %ymm1, %ymm2
-; FMA4-NEXT: vsubpd %ymm4, %ymm0, %ymm3
-; FMA4-NEXT: vaddpd %ymm5, %ymm1, %ymm1
-; FMA4-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3]
-; FMA4-NEXT: vaddpd %ymm4, %ymm0, %ymm0
-; FMA4-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3]
+; FMA4-NEXT: vfmsubaddpd %ymm4, %ymm2, %ymm0, %ymm0
+; FMA4-NEXT: vfmsubaddpd %ymm5, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
entry:
%AB = fmul <8 x double> %A, %B
@@ -152,35 +110,19 @@
define <16 x float> @mul_subadd_ps512(<16 x float> %A, <16 x float> %B, <16 x float> %C) #0 {
; FMA3_256-LABEL: mul_subadd_ps512:
; FMA3_256: # %bb.0: # %entry
-; FMA3_256-NEXT: vmulps %ymm2, %ymm0, %ymm0
-; FMA3_256-NEXT: vmulps %ymm3, %ymm1, %ymm1
-; FMA3_256-NEXT: vsubps %ymm5, %ymm1, %ymm2
-; FMA3_256-NEXT: vsubps %ymm4, %ymm0, %ymm3
-; FMA3_256-NEXT: vaddps %ymm5, %ymm1, %ymm1
-; FMA3_256-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
-; FMA3_256-NEXT: vaddps %ymm4, %ymm0, %ymm0
-; FMA3_256-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4],ymm3[5],ymm0[6],ymm3[7]
+; FMA3_256-NEXT: vfmsubadd213ps %ymm4, %ymm2, %ymm0
+; FMA3_256-NEXT: vfmsubadd213ps %ymm5, %ymm3, %ymm1
; FMA3_256-NEXT: retq
;
; FMA3_512-LABEL: mul_subadd_ps512:
; FMA3_512: # %bb.0: # %entry
-; FMA3_512-NEXT: vmulps %zmm1, %zmm0, %zmm1
-; FMA3_512-NEXT: vaddps %zmm2, %zmm1, %zmm0
-; FMA3_512-NEXT: movw $-21846, %ax # imm = 0xAAAA
-; FMA3_512-NEXT: kmovw %eax, %k1
-; FMA3_512-NEXT: vsubps %zmm2, %zmm1, %zmm0 {%k1}
+; FMA3_512-NEXT: vfmsubadd213ps %zmm2, %zmm1, %zmm0
; FMA3_512-NEXT: retq
;
; FMA4-LABEL: mul_subadd_ps512:
; FMA4: # %bb.0: # %entry
-; FMA4-NEXT: vmulps %ymm2, %ymm0, %ymm0
-; FMA4-NEXT: vmulps %ymm3, %ymm1, %ymm1
-; FMA4-NEXT: vsubps %ymm5, %ymm1, %ymm2
-; FMA4-NEXT: vsubps %ymm4, %ymm0, %ymm3
-; FMA4-NEXT: vaddps %ymm5, %ymm1, %ymm1
-; FMA4-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
-; FMA4-NEXT: vaddps %ymm4, %ymm0, %ymm0
-; FMA4-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4],ymm3[5],ymm0[6],ymm3[7]
+; FMA4-NEXT: vfmsubaddps %ymm4, %ymm2, %ymm0, %ymm0
+; FMA4-NEXT: vfmsubaddps %ymm5, %ymm3, %ymm1, %ymm1
; FMA4-NEXT: retq
entry:
%AB = fmul <16 x float> %A, %B