[X86] Improvement in CodeGen instruction selection for LEAs (re-applying post required revision changes.)

Summary:
   1/  Operand folding during complex pattern matching for LEAs has been
       extended, such that it promotes Scale to accommodate similar operand
       appearing in the DAG.
       e.g.
         T1 = A + B
         T2 = T1 + 10
         T3 = T2 + A
       For above DAG rooted at T3, X86AddressMode will no look like
         Base = B , Index = A , Scale = 2 , Disp = 10

   2/  During OptimizeLEAPass down the pipeline factorization is now performed over LEAs
       so that if there is an opportunity then complex LEAs (having 3 operands)
       could be factored out.
       e.g.
         leal 1(%rax,%rcx,1), %rdx
         leal 1(%rax,%rcx,2), %rcx
       will be factored as following
         leal 1(%rax,%rcx,1), %rdx
         leal (%rdx,%rcx)   , %edx

   3/ Aggressive operand folding for AM based selection for LEAs is sensitive to loops,
      thus avoiding creation of any complex LEAs within a loop.

Reviewers: lsaba, RKSimon, craig.topper, qcolombet, jmolloy

Reviewed By: lsaba

Subscribers: jmolloy, spatel, igorb, llvm-commits

    Differential Revision: https://reviews.llvm.org/D35014

llvm-svn: 314886
diff --git a/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll b/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll
index eb7eef7..ff4966b 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/callingconv.ll
@@ -405,7 +405,7 @@
 ; X32-NEXT:    movl 4(%ecx), %ecx
 ; X32-NEXT:    movl %eax, (%esp)
 ; X32-NEXT:    movl $4, %eax
-; X32-NEXT:    leal (%esp,%eax), %eax
+; X32-NEXT:    addl %esp, %eax
 ; X32-NEXT:    movl %edx, 4(%esp)
 ; X32-NEXT:    movl %ecx, 4(%eax)
 ; X32-NEXT:    calll variadic_callee
diff --git a/llvm/test/CodeGen/X86/GlobalISel/gep.ll b/llvm/test/CodeGen/X86/GlobalISel/gep.ll
index 94da9fb..c423fd3 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/gep.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/gep.ll
@@ -5,10 +5,10 @@
 define i32* @test_gep_i8(i32 *%arr, i8 %ind) {
 ; X64_GISEL-LABEL: test_gep_i8:
 ; X64_GISEL:       # BB#0:
-; X64_GISEL-NEXT:    movq $4, %rax
-; X64_GISEL-NEXT:    movsbq %sil, %rcx
-; X64_GISEL-NEXT:    imulq %rax, %rcx
-; X64_GISEL-NEXT:    leaq (%rdi,%rcx), %rax
+; X64_GISEL-NEXT:    movq $4, %rcx
+; X64_GISEL-NEXT:    movsbq %sil, %rax
+; X64_GISEL-NEXT:    imulq %rcx, %rax
+; X64_GISEL-NEXT:    addq %rdi, %rax
 ; X64_GISEL-NEXT:    retq
 ;
 ; X64-LABEL: test_gep_i8:
@@ -25,7 +25,7 @@
 ; X64_GISEL-LABEL: test_gep_i8_const:
 ; X64_GISEL:       # BB#0:
 ; X64_GISEL-NEXT:    movq $80, %rax
-; X64_GISEL-NEXT:    leaq (%rdi,%rax), %rax
+; X64_GISEL-NEXT:    addq %rdi, %rax
 ; X64_GISEL-NEXT:    retq
 ;
 ; X64-LABEL: test_gep_i8_const:
@@ -39,10 +39,10 @@
 define i32* @test_gep_i16(i32 *%arr, i16 %ind) {
 ; X64_GISEL-LABEL: test_gep_i16:
 ; X64_GISEL:       # BB#0:
-; X64_GISEL-NEXT:    movq $4, %rax
-; X64_GISEL-NEXT:    movswq %si, %rcx
-; X64_GISEL-NEXT:    imulq %rax, %rcx
-; X64_GISEL-NEXT:    leaq (%rdi,%rcx), %rax
+; X64_GISEL-NEXT:    movq $4, %rcx
+; X64_GISEL-NEXT:    movswq %si, %rax
+; X64_GISEL-NEXT:    imulq %rcx, %rax
+; X64_GISEL-NEXT:    addq %rdi, %rax
 ; X64_GISEL-NEXT:    retq
 ;
 ; X64-LABEL: test_gep_i16:
@@ -59,7 +59,7 @@
 ; X64_GISEL-LABEL: test_gep_i16_const:
 ; X64_GISEL:       # BB#0:
 ; X64_GISEL-NEXT:    movq $80, %rax
-; X64_GISEL-NEXT:    leaq (%rdi,%rax), %rax
+; X64_GISEL-NEXT:    addq %rdi, %rax
 ; X64_GISEL-NEXT:    retq
 ;
 ; X64-LABEL: test_gep_i16_const:
@@ -73,10 +73,10 @@
 define i32* @test_gep_i32(i32 *%arr, i32 %ind) {
 ; X64_GISEL-LABEL: test_gep_i32:
 ; X64_GISEL:       # BB#0:
-; X64_GISEL-NEXT:    movq $4, %rax
-; X64_GISEL-NEXT:    movslq %esi, %rcx
-; X64_GISEL-NEXT:    imulq %rax, %rcx
-; X64_GISEL-NEXT:    leaq (%rdi,%rcx), %rax
+; X64_GISEL-NEXT:    movq $4, %rcx
+; X64_GISEL-NEXT:    movslq %esi, %rax
+; X64_GISEL-NEXT:    imulq %rcx, %rax
+; X64_GISEL-NEXT:    addq %rdi, %rax
 ; X64_GISEL-NEXT:    retq
 ;
 ; X64-LABEL: test_gep_i32:
@@ -92,7 +92,7 @@
 ; X64_GISEL-LABEL: test_gep_i32_const:
 ; X64_GISEL:       # BB#0:
 ; X64_GISEL-NEXT:    movq $20, %rax
-; X64_GISEL-NEXT:    leaq (%rdi,%rax), %rax
+; X64_GISEL-NEXT:    addq %rdi, %rax
 ; X64_GISEL-NEXT:    retq
 ;
 ; X64-LABEL: test_gep_i32_const:
@@ -108,7 +108,7 @@
 ; X64_GISEL:       # BB#0:
 ; X64_GISEL-NEXT:    movq $4, %rax
 ; X64_GISEL-NEXT:    imulq %rsi, %rax
-; X64_GISEL-NEXT:    leaq (%rdi,%rax), %rax
+; X64_GISEL-NEXT:    addq %rdi, %rax
 ; X64_GISEL-NEXT:    retq
 ;
 ; X64-LABEL: test_gep_i64:
@@ -123,7 +123,7 @@
 ; X64_GISEL-LABEL: test_gep_i64_const:
 ; X64_GISEL:       # BB#0:
 ; X64_GISEL-NEXT:    movq $20, %rax
-; X64_GISEL-NEXT:    leaq (%rdi,%rax), %rax
+; X64_GISEL-NEXT:    addq %rdi, %rax
 ; X64_GISEL-NEXT:    retq
 ;
 ; X64-LABEL: test_gep_i64_const:
diff --git a/llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll
index 2097a3b..c73a431 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/memop-scalar.ll
@@ -181,7 +181,7 @@
 ; ALL-LABEL: test_gep_folding_largeGepIndex:
 ; ALL:       # BB#0:
 ; ALL-NEXT:    movabsq $228719476720, %rax # imm = 0x3540BE3FF0
-; ALL-NEXT:    leaq (%rdi,%rax), %rax
+; ALL-NEXT:    addq %rdi, %rax
 ; ALL-NEXT:    movl %esi, (%rax)
 ; ALL-NEXT:    movl (%rax), %eax
 ; ALL-NEXT:    retq
diff --git a/llvm/test/CodeGen/X86/lea-opt-cse1.ll b/llvm/test/CodeGen/X86/lea-opt-cse1.ll
index 86218a6..857a23f 100644
--- a/llvm/test/CodeGen/X86/lea-opt-cse1.ll
+++ b/llvm/test/CodeGen/X86/lea-opt-cse1.ll
@@ -9,29 +9,21 @@
 ; X64:       # BB#0: # %entry
 ; X64-NEXT:    movl (%rdi), %eax
 ; X64-NEXT:    movl 16(%rdi), %ecx
-; X64-NEXT:    leal (%rax,%rcx), %edx
 ; X64-NEXT:    leal 1(%rax,%rcx), %eax
 ; X64-NEXT:    movl %eax, 12(%rdi)
-; X64-NEXT:    leal 1(%rcx,%rdx), %eax
+; X64-NEXT:    addq %rcx, %eax
 ; X64-NEXT:    movl %eax, 16(%rdi)
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: test_func:
 ; X86:       # BB#0: # %entry
-; X86-NEXT:    pushl %esi
-; X86-NEXT:  .Lcfi0:
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:  .Lcfi1:
-; X86-NEXT:    .cfi_offset %esi, -8
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl (%eax), %ecx
 ; X86-NEXT:    movl 16(%eax), %edx
-; X86-NEXT:    leal 1(%ecx,%edx), %esi
+; X86-NEXT:    leal 1(%ecx,%edx), %ecx
+; X86-NEXT:    movl %ecx, 12(%eax)
 ; X86-NEXT:    addl %edx, %ecx
-; X86-NEXT:    movl %esi, 12(%eax)
-; X86-NEXT:    leal 1(%edx,%ecx), %ecx
 ; X86-NEXT:    movl %ecx, 16(%eax)
-; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl
  entry:
    %h0 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 0
diff --git a/llvm/test/CodeGen/X86/lea-opt-cse2.ll b/llvm/test/CodeGen/X86/lea-opt-cse2.ll
index 573b93d..73aeea0 100644
--- a/llvm/test/CodeGen/X86/lea-opt-cse2.ll
+++ b/llvm/test/CodeGen/X86/lea-opt-cse2.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s -check-prefix=X64
-; RUN: llc < %s -mtriple=i686-unknown   | FileCheck %s -check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown  -mattr=+slow-3ops-lea | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown  -mattr=+slow-3ops-lea  | FileCheck %s -check-prefix=X86
 
 %struct.SA = type { i32 , i32 , i32 , i32 , i32};
 
@@ -10,47 +10,41 @@
 ; X64-NEXT:    .p2align 4, 0x90
 ; X64-NEXT:  .LBB0_1: # %loop
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
-; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    movl 16(%rdi), %ecx
-; X64-NEXT:    leal 1(%rax,%rcx), %edx
-; X64-NEXT:    movl %edx, 12(%rdi)
+; X64-NEXT:    movl 16(%rdi), %eax
+; X64-NEXT:    movl (%rdi), %ecx
+; X64-NEXT:    addl %eax, %ecx
+; X64-NEXT:    incl %ecx
+; X64-NEXT:    movl %ecx, 12(%rdi)
 ; X64-NEXT:    decl %esi
 ; X64-NEXT:    jne .LBB0_1
 ; X64-NEXT:  # BB#2: # %exit
-; X64-NEXT:    addl %ecx, %eax
-; X64-NEXT:    leal 1(%rcx,%rax), %eax
-; X64-NEXT:    movl %eax, 16(%rdi)
+; X64-NEXT:    addl %eax, %ecx
+; X64-NEXT:    movl %ecx, 16(%rdi)
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: foo:
 ; X86:       # BB#0: # %entry
-; X86-NEXT:    pushl %edi
+; X86-NEXT:    pushl %esi
 ; X86-NEXT:  .Lcfi0:
 ; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:    pushl %esi
 ; X86-NEXT:  .Lcfi1:
-; X86-NEXT:    .cfi_def_cfa_offset 12
-; X86-NEXT:  .Lcfi2:
-; X86-NEXT:    .cfi_offset %esi, -12
-; X86-NEXT:  .Lcfi3:
-; X86-NEXT:    .cfi_offset %edi, -8
+; X86-NEXT:    .cfi_offset %esi, -8
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    .p2align 4, 0x90
 ; X86-NEXT:  .LBB0_1: # %loop
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
-; X86-NEXT:    movl (%eax), %edx
-; X86-NEXT:    movl 16(%eax), %esi
-; X86-NEXT:    leal 1(%edx,%esi), %edi
-; X86-NEXT:    movl %edi, 12(%eax)
+; X86-NEXT:    movl 16(%eax), %edx
+; X86-NEXT:    movl (%eax), %esi
+; X86-NEXT:    addl %edx, %esi
+; X86-NEXT:    incl %esi
+; X86-NEXT:    movl %esi, 12(%eax)
 ; X86-NEXT:    decl %ecx
 ; X86-NEXT:    jne .LBB0_1
 ; X86-NEXT:  # BB#2: # %exit
-; X86-NEXT:    addl %esi, %edx
-; X86-NEXT:    leal 1(%esi,%edx), %ecx
-; X86-NEXT:    movl %ecx, 16(%eax)
+; X86-NEXT:    addl %edx, %esi
+; X86-NEXT:    movl %esi, 16(%eax)
 ; X86-NEXT:    popl %esi
-; X86-NEXT:    popl %edi
 ; X86-NEXT:    retl
  entry:
    br label %loop
diff --git a/llvm/test/CodeGen/X86/lea-opt-cse3.ll b/llvm/test/CodeGen/X86/lea-opt-cse3.ll
index 4e030fb..7fabd58 100644
--- a/llvm/test/CodeGen/X86/lea-opt-cse3.ll
+++ b/llvm/test/CodeGen/X86/lea-opt-cse3.ll
@@ -8,7 +8,7 @@
 ; X64-NEXT:    # kill: %ESI<def> %ESI<kill> %RSI<def>
 ; X64-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
 ; X64-NEXT:    leal 4(%rdi,%rsi,2), %ecx
-; X64-NEXT:    leal 4(%rdi,%rsi,4), %eax
+; X64-NEXT:    leal (%ecx,%rsi,2), %eax
 ; X64-NEXT:    imull %ecx, %eax
 ; X64-NEXT:    retq
 ;
@@ -16,9 +16,9 @@
 ; X86:       # BB#0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    leal 4(%ecx,%eax,2), %edx
-; X86-NEXT:    leal 4(%ecx,%eax,4), %eax
-; X86-NEXT:    imull %edx, %eax
+; X86-NEXT:    leal 4(%ecx,%eax,2), %ecx
+; X86-NEXT:    leal (%ecx,%eax,2), %eax
+; X86-NEXT:    imull %ecx, %eax
 ; X86-NEXT:    retl
 entry:
   %mul = shl i32 %b, 1
@@ -36,7 +36,7 @@
 ; X64-NEXT:    # kill: %ESI<def> %ESI<kill> %RSI<def>
 ; X64-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
 ; X64-NEXT:    leal 4(%rdi,%rsi,4), %ecx
-; X64-NEXT:    leal 4(%rdi,%rsi,8), %eax
+; X64-NEXT:    leal (%ecx,%rsi,4), %eax
 ; X64-NEXT:    imull %ecx, %eax
 ; X64-NEXT:    retq
 ;
@@ -44,9 +44,9 @@
 ; X86:       # BB#0: # %entry
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    leal 4(%ecx,%eax,4), %edx
-; X86-NEXT:    leal 4(%ecx,%eax,8), %eax
-; X86-NEXT:    imull %edx, %eax
+; X86-NEXT:    leal 4(%ecx,%eax,4), %ecx
+; X86-NEXT:    leal (%ecx,%eax,4), %eax
+; X86-NEXT:    imull %ecx, %eax
 ; X86-NEXT:    retl
 entry:
   %mul = shl i32 %b, 2
@@ -68,31 +68,23 @@
 ; X64-NEXT:    cmpl $10, %ecx
 ; X64-NEXT:    je .LBB2_2
 ; X64-NEXT:  # BB#1: # %mid
-; X64-NEXT:    leal 4(%rdi,%rsi,8), %eax
-; X64-NEXT:    imull %eax, %ecx
-; X64-NEXT:    movl %ecx, %eax
+; X64-NEXT:    leal (%ecx,%rsi,4), %eax
+; X64-NEXT:    imull %ecx, %eax
 ; X64-NEXT:  .LBB2_2: # %exit
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: foo1_mult_basic_blocks:
 ; X86:       # BB#0: # %entry
-; X86-NEXT:    pushl %esi
-; X86-NEXT:  .Lcfi0:
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:  .Lcfi1:
-; X86-NEXT:    .cfi_offset %esi, -8
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT:    leal 4(%esi,%edx,4), %ecx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal 4(%eax,%edx,4), %ecx
 ; X86-NEXT:    xorl %eax, %eax
 ; X86-NEXT:    cmpl $10, %ecx
 ; X86-NEXT:    je .LBB2_2
 ; X86-NEXT:  # BB#1: # %mid
-; X86-NEXT:    leal 4(%esi,%edx,8), %eax
-; X86-NEXT:    imull %eax, %ecx
-; X86-NEXT:    movl %ecx, %eax
+; X86-NEXT:    leal (%ecx,%edx,4), %eax
+; X86-NEXT:    imull %ecx, %eax
 ; X86-NEXT:  .LBB2_2: # %exit
-; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl
 entry:
   %mul = shl i32 %b, 2
@@ -131,9 +123,9 @@
 ; X86-LABEL: foo1_mult_basic_blocks_illegal_scale:
 ; X86:       # BB#0: # %entry
 ; X86-NEXT:    pushl %esi
-; X86-NEXT:  .Lcfi2:
+; X86-NEXT:  .Lcfi0:
 ; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:  .Lcfi3:
+; X86-NEXT:  .Lcfi1:
 ; X86-NEXT:    .cfi_offset %esi, -8
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
diff --git a/llvm/test/CodeGen/X86/lea-opt-cse4.ll b/llvm/test/CodeGen/X86/lea-opt-cse4.ll
index d7db0a2..f8697bf 100644
--- a/llvm/test/CodeGen/X86/lea-opt-cse4.ll
+++ b/llvm/test/CodeGen/X86/lea-opt-cse4.ll
@@ -1,43 +1,31 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s -check-prefix=X64
-; RUN: llc < %s -mtriple=i686-unknown   | FileCheck %s -check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+slow-3ops-lea | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+slow-3ops-lea | FileCheck %s -check-prefix=X86
 
 %struct.SA = type { i32 , i32 , i32 , i32 , i32};
 
 define void @foo(%struct.SA* nocapture %ctx, i32 %n) local_unnamed_addr #0 {
 ; X64-LABEL: foo:
 ; X64:       # BB#0: # %entry
-; X64-NEXT:    movl 16(%rdi), %eax
-; X64-NEXT:    movl (%rdi), %ecx
-; X64-NEXT:    addl %eax, %ecx
-; X64-NEXT:    addl %eax, %ecx
-; X64-NEXT:    addl %eax, %ecx
-; X64-NEXT:    leal (%rcx,%rax), %edx
-; X64-NEXT:    leal 1(%rax,%rcx), %ecx
-; X64-NEXT:    movl %ecx, 12(%rdi)
-; X64-NEXT:    leal 1(%rax,%rdx), %eax
+; X64-NEXT:    movl (%rdi), %eax
+; X64-NEXT:    movl 16(%rdi), %ecx
+; X64-NEXT:    leal (%rax,%rcx,4), %eax
+; X64-NEXT:    addl $1, %eax
+; X64-NEXT:    movl %eax, 12(%rdi)
+; X64-NEXT:    addl %ecx, %eax
 ; X64-NEXT:    movl %eax, 16(%rdi)
 ; X64-NEXT:    retq
 ;
 ; X86-LABEL: foo:
 ; X86:       # BB#0: # %entry
-; X86-NEXT:    pushl %esi
-; X86-NEXT:  .Lcfi0:
-; X86-NEXT:    .cfi_def_cfa_offset 8
-; X86-NEXT:  .Lcfi1:
-; X86-NEXT:    .cfi_offset %esi, -8
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT:    movl 16(%eax), %ecx
-; X86-NEXT:    movl (%eax), %edx
-; X86-NEXT:    addl %ecx, %edx
-; X86-NEXT:    addl %ecx, %edx
-; X86-NEXT:    addl %ecx, %edx
-; X86-NEXT:    leal 1(%ecx,%edx), %esi
-; X86-NEXT:    addl %ecx, %edx
-; X86-NEXT:    movl %esi, 12(%eax)
-; X86-NEXT:    leal 1(%ecx,%edx), %ecx
+; X86-NEXT:    movl (%eax), %ecx
+; X86-NEXT:    movl 16(%eax), %edx
+; X86-NEXT:    leal (%ecx,%edx,4), %ecx
+; X86-NEXT:    addl $1, %ecx
+; X86-NEXT:    movl %ecx, 12(%eax)
+; X86-NEXT:    addl %edx, %ecx
 ; X86-NEXT:    movl %ecx, 16(%eax)
-; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl
  entry:
    %h0 = getelementptr inbounds %struct.SA, %struct.SA* %ctx, i64 0, i32 0
@@ -64,15 +52,15 @@
 ; X64-NEXT:    .p2align 4, 0x90
 ; X64-NEXT:  .LBB1_1: # %loop
 ; X64-NEXT:    # =>This Inner Loop Header: Depth=1
-; X64-NEXT:    movl (%rdi), %ecx
 ; X64-NEXT:    movl 16(%rdi), %eax
-; X64-NEXT:    leal 1(%rcx,%rax), %edx
-; X64-NEXT:    movl %edx, 12(%rdi)
+; X64-NEXT:    movl (%rdi), %ecx
+; X64-NEXT:    addl %eax, %ecx
+; X64-NEXT:    incl %ecx
+; X64-NEXT:    movl %ecx, 12(%rdi)
 ; X64-NEXT:    decl %esi
 ; X64-NEXT:    jne .LBB1_1
 ; X64-NEXT:  # BB#2: # %exit
 ; X64-NEXT:    addl %eax, %ecx
-; X64-NEXT:    leal 1(%rax,%rcx), %ecx
 ; X64-NEXT:    addl %eax, %ecx
 ; X64-NEXT:    addl %eax, %ecx
 ; X64-NEXT:    addl %eax, %ecx
@@ -84,30 +72,25 @@
 ;
 ; X86-LABEL: foo_loop:
 ; X86:       # BB#0: # %entry
-; X86-NEXT:    pushl %edi
-; X86-NEXT:  .Lcfi2:
-; X86-NEXT:    .cfi_def_cfa_offset 8
 ; X86-NEXT:    pushl %esi
-; X86-NEXT:  .Lcfi3:
-; X86-NEXT:    .cfi_def_cfa_offset 12
-; X86-NEXT:  .Lcfi4:
-; X86-NEXT:    .cfi_offset %esi, -12
-; X86-NEXT:  .Lcfi5:
-; X86-NEXT:    .cfi_offset %edi, -8
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:  .Lcfi0:
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:  .Lcfi1:
+; X86-NEXT:    .cfi_offset %esi, -8
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    .p2align 4, 0x90
 ; X86-NEXT:  .LBB1_1: # %loop
 ; X86-NEXT:    # =>This Inner Loop Header: Depth=1
-; X86-NEXT:    movl (%eax), %esi
 ; X86-NEXT:    movl 16(%eax), %ecx
-; X86-NEXT:    leal 1(%esi,%ecx), %edi
-; X86-NEXT:    movl %edi, 12(%eax)
-; X86-NEXT:    decl %edx
+; X86-NEXT:    movl (%eax), %edx
+; X86-NEXT:    addl %ecx, %edx
+; X86-NEXT:    incl %edx
+; X86-NEXT:    movl %edx, 12(%eax)
+; X86-NEXT:    decl %esi
 ; X86-NEXT:    jne .LBB1_1
 ; X86-NEXT:  # BB#2: # %exit
-; X86-NEXT:    addl %ecx, %esi
-; X86-NEXT:    leal 1(%ecx,%esi), %edx
+; X86-NEXT:    addl %ecx, %edx
 ; X86-NEXT:    addl %ecx, %edx
 ; X86-NEXT:    addl %ecx, %edx
 ; X86-NEXT:    addl %ecx, %edx
@@ -116,7 +99,6 @@
 ; X86-NEXT:    addl %ecx, %edx
 ; X86-NEXT:    movl %edx, 16(%eax)
 ; X86-NEXT:    popl %esi
-; X86-NEXT:    popl %edi
 ; X86-NEXT:    retl
  entry:
    br label %loop
diff --git a/llvm/test/CodeGen/X86/mul-constant-i16.ll b/llvm/test/CodeGen/X86/mul-constant-i16.ll
index 7b39bfe..ec89304 100644
--- a/llvm/test/CodeGen/X86/mul-constant-i16.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-i16.ll
@@ -558,11 +558,10 @@
 define i16 @test_mul_by_29(i16 %x) {
 ; X86-LABEL: test_mul_by_29:
 ; X86:       # BB#0:
-; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    leal (%ecx,%ecx,8), %eax
-; X86-NEXT:    leal (%eax,%eax,2), %eax
-; X86-NEXT:    addl %ecx, %eax
-; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal (%eax,%eax,8), %ecx
+; X86-NEXT:    leal (%ecx,%ecx,2), %ecx
+; X86-NEXT:    leal (%ecx,%eax,2), %eax
 ; X86-NEXT:    # kill: %AX<def> %AX<kill> %EAX<kill>
 ; X86-NEXT:    retl
 ;
@@ -571,8 +570,7 @@
 ; X64-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
 ; X64-NEXT:    leal (%rdi,%rdi,8), %eax
 ; X64-NEXT:    leal (%rax,%rax,2), %eax
-; X64-NEXT:    addl %edi, %eax
-; X64-NEXT:    addl %edi, %eax
+; X64-NEXT:    leal (%rax,%rdi,2), %eax
 ; X64-NEXT:    # kill: %AX<def> %AX<kill> %EAX<kill>
 ; X64-NEXT:    retq
   %mul = mul nsw i16 %x, 29
diff --git a/llvm/test/CodeGen/X86/mul-constant-i32.ll b/llvm/test/CodeGen/X86/mul-constant-i32.ll
index 38599f6..6ef5f93 100644
--- a/llvm/test/CodeGen/X86/mul-constant-i32.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-i32.ll
@@ -1457,11 +1457,10 @@
 define i32 @test_mul_by_29(i32 %x) {
 ; X86-LABEL: test_mul_by_29:
 ; X86:       # BB#0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    leal (%ecx,%ecx,8), %eax
-; X86-NEXT:    leal (%eax,%eax,2), %eax
-; X86-NEXT:    addl %ecx, %eax
-; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    leal (%eax,%eax,8), %ecx
+; X86-NEXT:    leal (%ecx,%ecx,2), %ecx
+; X86-NEXT:    leal (%ecx,%eax,2), %eax
 ; X86-NEXT:    retl
 ;
 ; X64-HSW-LABEL: test_mul_by_29:
@@ -1469,8 +1468,7 @@
 ; X64-HSW-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
 ; X64-HSW-NEXT:    leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
 ; X64-HSW-NEXT:    leal (%rax,%rax,2), %eax # sched: [1:0.50]
-; X64-HSW-NEXT:    addl %edi, %eax # sched: [1:0.25]
-; X64-HSW-NEXT:    addl %edi, %eax # sched: [1:0.25]
+; X64-HSW-NEXT:    leal (%rax,%rdi,2), %eax # sched: [1:0.50]
 ; X64-HSW-NEXT:    retq # sched: [2:1.00]
 ;
 ; X64-JAG-LABEL: test_mul_by_29:
@@ -1478,8 +1476,7 @@
 ; X64-JAG-NEXT:    # kill: %EDI<def> %EDI<kill> %RDI<def>
 ; X64-JAG-NEXT:    leal (%rdi,%rdi,8), %eax # sched: [1:0.50]
 ; X64-JAG-NEXT:    leal (%rax,%rax,2), %eax # sched: [1:0.50]
-; X64-JAG-NEXT:    addl %edi, %eax # sched: [1:0.50]
-; X64-JAG-NEXT:    addl %edi, %eax # sched: [1:0.50]
+; X64-JAG-NEXT:    leal (%rax,%rdi,2), %eax # sched: [1:0.50]
 ; X64-JAG-NEXT:    retq # sched: [4:1.00]
 ;
 ; X86-NOOPT-LABEL: test_mul_by_29:
diff --git a/llvm/test/CodeGen/X86/mul-constant-i64.ll b/llvm/test/CodeGen/X86/mul-constant-i64.ll
index 98568a6..8e171f9 100644
--- a/llvm/test/CodeGen/X86/mul-constant-i64.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-i64.ll
@@ -1523,8 +1523,7 @@
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    leal (%eax,%eax,8), %ecx
 ; X86-NEXT:    leal (%ecx,%ecx,2), %ecx
-; X86-NEXT:    addl %eax, %ecx
-; X86-NEXT:    addl %eax, %ecx
+; X86-NEXT:    leal (%ecx,%eax,2), %ecx
 ; X86-NEXT:    movl $29, %eax
 ; X86-NEXT:    mull {{[0-9]+}}(%esp)
 ; X86-NEXT:    addl %ecx, %edx
@@ -1534,16 +1533,14 @@
 ; X64-HSW:       # BB#0:
 ; X64-HSW-NEXT:    leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
 ; X64-HSW-NEXT:    leaq (%rax,%rax,2), %rax # sched: [1:0.50]
-; X64-HSW-NEXT:    addq %rdi, %rax # sched: [1:0.25]
-; X64-HSW-NEXT:    addq %rdi, %rax # sched: [1:0.25]
+; X64-HSW-NEXT:    leaq (%rax,%rdi,2), %rax # sched: [1:0.50]
 ; X64-HSW-NEXT:    retq # sched: [2:1.00]
 ;
 ; X64-JAG-LABEL: test_mul_by_29:
 ; X64-JAG:       # BB#0:
 ; X64-JAG-NEXT:    leaq (%rdi,%rdi,8), %rax # sched: [1:0.50]
 ; X64-JAG-NEXT:    leaq (%rax,%rax,2), %rax # sched: [1:0.50]
-; X64-JAG-NEXT:    addq %rdi, %rax # sched: [1:0.50]
-; X64-JAG-NEXT:    addq %rdi, %rax # sched: [1:0.50]
+; X64-JAG-NEXT:    leaq (%rax,%rdi,2), %rax # sched: [1:0.50]
 ; X64-JAG-NEXT:    retq # sched: [4:1.00]
 ;
 ; X86-NOOPT-LABEL: test_mul_by_29:
diff --git a/llvm/test/CodeGen/X86/mul-constant-result.ll b/llvm/test/CodeGen/X86/mul-constant-result.ll
index 417b438..d0a6cb6 100644
--- a/llvm/test/CodeGen/X86/mul-constant-result.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-result.ll
@@ -166,8 +166,7 @@
 ; X86-NEXT:  .LBB0_35:
 ; X86-NEXT:    leal (%eax,%eax,8), %ecx
 ; X86-NEXT:    leal (%ecx,%ecx,2), %ecx
-; X86-NEXT:    addl %eax, %ecx
-; X86-NEXT:    addl %ecx, %eax
+; X86-NEXT:    leal (%ecx,%eax,2), %eax
 ; X86-NEXT:    popl %esi
 ; X86-NEXT:    retl
 ; X86-NEXT:  .LBB0_36:
@@ -325,16 +324,17 @@
 ; X64-HSW-NEXT:  .LBB0_31:
 ; X64-HSW-NEXT:    leal (%rax,%rax,8), %ecx
 ; X64-HSW-NEXT:    leal (%rcx,%rcx,2), %ecx
-; X64-HSW-NEXT:    jmp .LBB0_17
-; X64-HSW-NEXT:  .LBB0_32:
-; X64-HSW-NEXT:    leal (%rax,%rax,8), %ecx
-; X64-HSW-NEXT:    leal (%rcx,%rcx,2), %ecx
-; X64-HSW-NEXT:    addl %eax, %ecx
 ; X64-HSW-NEXT:  .LBB0_17:
 ; X64-HSW-NEXT:    addl %eax, %ecx
 ; X64-HSW-NEXT:    movl %ecx, %eax
 ; X64-HSW-NEXT:    # kill: %EAX<def> %EAX<kill> %RAX<kill>
 ; X64-HSW-NEXT:    retq
+; X64-HSW-NEXT:  .LBB0_32:
+; X64-HSW-NEXT:    leal (%rax,%rax,8), %ecx
+; X64-HSW-NEXT:    leal (%rcx,%rcx,2), %ecx
+; X64-HSW-NEXT:    leal (%rcx,%rax,2), %eax
+; X64-HSW-NEXT:    # kill: %EAX<def> %EAX<kill> %RAX<kill>
+; X64-HSW-NEXT:    retq
 ; X64-HSW-NEXT:  .LBB0_33:
 ; X64-HSW-NEXT:    movl %eax, %ecx
 ; X64-HSW-NEXT:    shll $5, %ecx
diff --git a/llvm/test/CodeGen/X86/pr34629.ll b/llvm/test/CodeGen/X86/pr34629.ll
index 031b99d..e37f03b 100644
--- a/llvm/test/CodeGen/X86/pr34629.ll
+++ b/llvm/test/CodeGen/X86/pr34629.ll
@@ -13,8 +13,8 @@
 ; CHECK:       # BB#0: # %entry
 ; CHECK-NEXT:    movq {{.*}}(%rip), %rax
 ; CHECK-NEXT:    leaq (%rax,%rax,4), %rcx
+; CHECK-NEXT:    leaq (%rcx,%rax,4), %rax
 ; CHECK-NEXT:    negq %rcx
-; CHECK-NEXT:    leaq (%rax,%rax,8), %rax
 ; CHECK-NEXT:    leaq (%rax,%rax,4), %rax
 ; CHECK-NEXT:    testq %rax, %rcx
 ; CHECK-NEXT:    je .LBB0_2
diff --git a/llvm/test/CodeGen/X86/pr34634.ll b/llvm/test/CodeGen/X86/pr34634.ll
index 6ebd6d8..6eab7e4 100644
--- a/llvm/test/CodeGen/X86/pr34634.ll
+++ b/llvm/test/CodeGen/X86/pr34634.ll
@@ -14,11 +14,11 @@
 ; CHECK-NEXT:    movslq {{.*}}(%rip), %rax
 ; CHECK-NEXT:    leaq (%rax,%rax,4), %rcx
 ; CHECK-NEXT:    leaq (,%rax,4), %rdx
-; CHECK-NEXT:    movl a(%rdx,%rcx,8), %ecx
-; CHECK-NEXT:    leaq (%rax,%rax,8), %rdx
-; CHECK-NEXT:    leaq (%rdx,%rdx,2), %rdx
-; CHECK-NEXT:    addq %rax, %rdx
-; CHECK-NEXT:    movl %ecx, b(%rdx,%rax,4)
+; CHECK-NEXT:    movl a(%rdx,%rcx,8), %edx
+; CHECK-NEXT:    leaq (%rcx,%rax,4), %rcx
+; CHECK-NEXT:    leaq (%rcx,%rcx,2), %rcx
+; CHECK-NEXT:    addq %rax, %rcx
+; CHECK-NEXT:    movl %edx, b(%rcx,%rax,4)
 ; CHECK-NEXT:    retq
 entry:
   %0 = load i32, i32* @c, align 4, !tbaa !2
@@ -37,11 +37,11 @@
 ; CHECK-NEXT:    movslq {{.*}}(%rip), %rax
 ; CHECK-NEXT:    leaq (%rax,%rax,4), %rcx
 ; CHECK-NEXT:    leaq (,%rax,4), %rdx
-; CHECK-NEXT:    movl a(%rdx,%rcx,8), %ecx
-; CHECK-NEXT:    leaq (%rax,%rax,8), %rdx
-; CHECK-NEXT:    leaq (%rdx,%rdx,2), %rdx
-; CHECK-NEXT:    addq %rax, %rdx
-; CHECK-NEXT:    movl %ecx, b(%rdx,%rax,4)
+; CHECK-NEXT:    movl a(%rdx,%rcx,8), %edx
+; CHECK-NEXT:    leaq (%rcx,%rax,4), %rcx
+; CHECK-NEXT:    leaq (%rcx,%rcx,2), %rcx
+; CHECK-NEXT:    addq %rax, %rcx
+; CHECK-NEXT:    movl %edx, b(%rcx,%rax,4)
 ; CHECK-NEXT:    xorl %eax, %eax
 ; CHECK-NEXT:    retq
 entry:
diff --git a/llvm/test/CodeGen/X86/umul-with-overflow.ll b/llvm/test/CodeGen/X86/umul-with-overflow.ll
index e198a15..55be10f 100644
--- a/llvm/test/CodeGen/X86/umul-with-overflow.ll
+++ b/llvm/test/CodeGen/X86/umul-with-overflow.ll
@@ -40,10 +40,10 @@
 ; X64-NEXT:    leal (%rdi,%rdi), %eax
 ; X64-NEXT:    retq
 entry:
-	%tmp0 = add i32 %b, %a
-	%tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 2)
-	%tmp2 = extractvalue { i32, i1 } %tmp1, 0
-	ret i32 %tmp2
+        %tmp0 = add i32 %b, %a
+        %tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 2)
+        %tmp2 = extractvalue { i32, i1 } %tmp1, 0
+        ret i32 %tmp2
 }
 
 define i32 @test3(i32 %a, i32 %b) nounwind readnone {
@@ -64,8 +64,8 @@
 ; X64-NEXT:    mull %ecx
 ; X64-NEXT:    retq
 entry:
-	%tmp0 = add i32 %b, %a
-	%tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 4)
-	%tmp2 = extractvalue { i32, i1 } %tmp1, 0
-	ret i32 %tmp2
+        %tmp0 = add i32 %b, %a
+        %tmp1 = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %tmp0, i32 4)
+        %tmp2 = extractvalue { i32, i1 } %tmp1, 0
+        ret i32 %tmp2
 }
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
index 7c01432..3f6d234 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
@@ -13,14 +13,14 @@
 ; X64-NEXT: .p2align
 ; X64: %loop
 ; no complex address modes
-; X64-NOT: (%{{[^)]+}},%{{[^)]+}},
+; X64-NOT: [1-9]+(%{{[^)]+}},%{{[^)]+}},
 ;
 ; X32: @simple
 ; no expensive address computation in the preheader
 ; X32-NOT: imul
 ; X32: %loop
 ; no complex address modes
-; X32-NOT: (%{{[^)]+}},%{{[^)]+}},
+; X32-NOT: [1-9]+(%{{[^)]+}},%{{[^)]+}},
 define i32 @simple(i32* %a, i32* %b, i32 %x) nounwind {
 entry:
   br label %loop
@@ -103,7 +103,7 @@
 ; X32-NOT: mov{{.*}}(%esp){{$}}
 ; X32: %for.body{{$}}
 ; no complex address modes
-; X32-NOT: (%{{[^)]+}},%{{[^)]+}},
+; X32-NOT: [1-9]+(%{{[^)]+}},%{{[^)]+}},
 ; no reloads
 ; X32-NOT: (%esp)
 define void @extrastride(i8* nocapture %main, i32 %main_stride, i32* nocapture %res, i32 %x, i32 %y, i32 %z) nounwind {