Revert "[ExpandMemCmp] Split ExpandMemCmp from CodeGen into its own pass."

undefined reference to `llvm::TargetPassConfig::ID' on
clang-ppc64le-linux-multistage

This reverts commit eea333c33fa73ad225ef28607795984829f65688.

llvm-svn: 317213
diff --git a/llvm/test/CodeGen/X86/memcmp-optsize.ll b/llvm/test/CodeGen/X86/memcmp-optsize.ll
index 3f5eeba..77d9fa6 100644
--- a/llvm/test/CodeGen/X86/memcmp-optsize.ll
+++ b/llvm/test/CodeGen/X86/memcmp-optsize.ll
@@ -156,36 +156,36 @@
 
 define i1 @length3_eq(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length3_eq:
-; X86:       # BB#0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86:       # BB#0: # %loadbb
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movzwl (%ecx), %edx
-; X86-NEXT:    cmpw (%eax), %dx
-; X86-NEXT:    jne .LBB5_2
-; X86-NEXT:  # BB#1: # %loadbb1
-; X86-NEXT:    movb 2(%ecx), %dl
-; X86-NEXT:    xorl %ecx, %ecx
-; X86-NEXT:    cmpb 2(%eax), %dl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movzwl (%eax), %edx
+; X86-NEXT:    cmpw (%ecx), %dx
+; X86-NEXT:    jne .LBB5_1
+; X86-NEXT:  # BB#2: # %loadbb1
+; X86-NEXT:    movb 2(%eax), %dl
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    cmpb 2(%ecx), %dl
 ; X86-NEXT:    je .LBB5_3
-; X86-NEXT:  .LBB5_2: # %res_block
-; X86-NEXT:    xorl %ecx, %ecx
-; X86-NEXT:    incl %ecx
+; X86-NEXT:  .LBB5_1: # %res_block
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    incl %eax
 ; X86-NEXT:  .LBB5_3: # %endblock
-; X86-NEXT:    testl %ecx, %ecx
+; X86-NEXT:    testl %eax, %eax
 ; X86-NEXT:    setne %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length3_eq:
-; X64:       # BB#0:
+; X64:       # BB#0: # %loadbb
 ; X64-NEXT:    movzwl (%rdi), %eax
 ; X64-NEXT:    cmpw (%rsi), %ax
-; X64-NEXT:    jne .LBB5_2
-; X64-NEXT:  # BB#1: # %loadbb1
+; X64-NEXT:    jne .LBB5_1
+; X64-NEXT:  # BB#2: # %loadbb1
 ; X64-NEXT:    movb 2(%rdi), %cl
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    cmpb 2(%rsi), %cl
 ; X64-NEXT:    je .LBB5_3
-; X64-NEXT:  .LBB5_2: # %res_block
+; X64-NEXT:  .LBB5_1: # %res_block
 ; X64-NEXT:    movl $1, %eax
 ; X64-NEXT:  .LBB5_3: # %endblock
 ; X64-NEXT:    testl %eax, %eax
@@ -314,36 +314,36 @@
 
 define i1 @length5_eq(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length5_eq:
-; X86:       # BB#0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86:       # BB#0: # %loadbb
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl (%ecx), %edx
-; X86-NEXT:    cmpl (%eax), %edx
-; X86-NEXT:    jne .LBB10_2
-; X86-NEXT:  # BB#1: # %loadbb1
-; X86-NEXT:    movb 4(%ecx), %dl
-; X86-NEXT:    xorl %ecx, %ecx
-; X86-NEXT:    cmpb 4(%eax), %dl
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %edx
+; X86-NEXT:    cmpl (%ecx), %edx
+; X86-NEXT:    jne .LBB10_1
+; X86-NEXT:  # BB#2: # %loadbb1
+; X86-NEXT:    movb 4(%eax), %dl
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    cmpb 4(%ecx), %dl
 ; X86-NEXT:    je .LBB10_3
-; X86-NEXT:  .LBB10_2: # %res_block
-; X86-NEXT:    xorl %ecx, %ecx
-; X86-NEXT:    incl %ecx
+; X86-NEXT:  .LBB10_1: # %res_block
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    incl %eax
 ; X86-NEXT:  .LBB10_3: # %endblock
-; X86-NEXT:    testl %ecx, %ecx
+; X86-NEXT:    testl %eax, %eax
 ; X86-NEXT:    setne %al
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length5_eq:
-; X64:       # BB#0:
+; X64:       # BB#0: # %loadbb
 ; X64-NEXT:    movl (%rdi), %eax
 ; X64-NEXT:    cmpl (%rsi), %eax
-; X64-NEXT:    jne .LBB10_2
-; X64-NEXT:  # BB#1: # %loadbb1
+; X64-NEXT:    jne .LBB10_1
+; X64-NEXT:  # BB#2: # %loadbb1
 ; X64-NEXT:    movb 4(%rdi), %cl
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    cmpb 4(%rsi), %cl
 ; X64-NEXT:    je .LBB10_3
-; X64-NEXT:  .LBB10_2: # %res_block
+; X64-NEXT:  .LBB10_1: # %res_block
 ; X64-NEXT:    movl $1, %eax
 ; X64-NEXT:  .LBB10_3: # %endblock
 ; X64-NEXT:    testl %eax, %eax
@@ -356,7 +356,7 @@
 
 define i32 @length8(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length8:
-; X86:       # BB#0:
+; X86:       # BB#0: # %loadbb
 ; X86-NEXT:    pushl %esi
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi
@@ -365,8 +365,8 @@
 ; X86-NEXT:    bswapl %ecx
 ; X86-NEXT:    bswapl %edx
 ; X86-NEXT:    cmpl %edx, %ecx
-; X86-NEXT:    jne .LBB11_2
-; X86-NEXT:  # BB#1: # %loadbb1
+; X86-NEXT:    jne .LBB11_1
+; X86-NEXT:  # BB#2: # %loadbb1
 ; X86-NEXT:    movl 4(%esi), %ecx
 ; X86-NEXT:    movl 4(%eax), %edx
 ; X86-NEXT:    bswapl %ecx
@@ -374,7 +374,7 @@
 ; X86-NEXT:    xorl %eax, %eax
 ; X86-NEXT:    cmpl %edx, %ecx
 ; X86-NEXT:    je .LBB11_3
-; X86-NEXT:  .LBB11_2: # %res_block
+; X86-NEXT:  .LBB11_1: # %res_block
 ; X86-NEXT:    xorl %eax, %eax
 ; X86-NEXT:    cmpl %edx, %ecx
 ; X86-NEXT:    setae %al
@@ -400,22 +400,22 @@
 
 define i1 @length8_eq(i8* %X, i8* %Y) nounwind optsize {
 ; X86-LABEL: length8_eq:
-; X86:       # BB#0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86:       # BB#0: # %loadbb
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl (%ecx), %edx
-; X86-NEXT:    cmpl (%eax), %edx
-; X86-NEXT:    jne .LBB12_2
-; X86-NEXT:  # BB#1: # %loadbb1
-; X86-NEXT:    movl 4(%ecx), %edx
-; X86-NEXT:    xorl %ecx, %ecx
-; X86-NEXT:    cmpl 4(%eax), %edx
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl (%eax), %edx
+; X86-NEXT:    cmpl (%ecx), %edx
+; X86-NEXT:    jne .LBB12_1
+; X86-NEXT:  # BB#2: # %loadbb1
+; X86-NEXT:    movl 4(%eax), %edx
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    cmpl 4(%ecx), %edx
 ; X86-NEXT:    je .LBB12_3
-; X86-NEXT:  .LBB12_2: # %res_block
-; X86-NEXT:    xorl %ecx, %ecx
-; X86-NEXT:    incl %ecx
+; X86-NEXT:  .LBB12_1: # %res_block
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    incl %eax
 ; X86-NEXT:  .LBB12_3: # %endblock
-; X86-NEXT:    testl %ecx, %ecx
+; X86-NEXT:    testl %eax, %eax
 ; X86-NEXT:    sete %al
 ; X86-NEXT:    retl
 ;
@@ -432,15 +432,15 @@
 
 define i1 @length8_eq_const(i8* %X) nounwind optsize {
 ; X86-LABEL: length8_eq_const:
-; X86:       # BB#0:
+; X86:       # BB#0: # %loadbb
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-NEXT:    cmpl $858927408, (%ecx) # imm = 0x33323130
-; X86-NEXT:    jne .LBB13_2
-; X86-NEXT:  # BB#1: # %loadbb1
+; X86-NEXT:    jne .LBB13_1
+; X86-NEXT:  # BB#2: # %loadbb1
 ; X86-NEXT:    xorl %eax, %eax
 ; X86-NEXT:    cmpl $926299444, 4(%ecx) # imm = 0x37363534
 ; X86-NEXT:    je .LBB13_3
-; X86-NEXT:  .LBB13_2: # %res_block
+; X86-NEXT:  .LBB13_1: # %res_block
 ; X86-NEXT:    xorl %eax, %eax
 ; X86-NEXT:    incl %eax
 ; X86-NEXT:  .LBB13_3: # %endblock
@@ -473,16 +473,16 @@
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length12_eq:
-; X64:       # BB#0:
+; X64:       # BB#0: # %loadbb
 ; X64-NEXT:    movq (%rdi), %rax
 ; X64-NEXT:    cmpq (%rsi), %rax
-; X64-NEXT:    jne .LBB14_2
-; X64-NEXT:  # BB#1: # %loadbb1
+; X64-NEXT:    jne .LBB14_1
+; X64-NEXT:  # BB#2: # %loadbb1
 ; X64-NEXT:    movl 8(%rdi), %ecx
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    cmpl 8(%rsi), %ecx
 ; X64-NEXT:    je .LBB14_3
-; X64-NEXT:  .LBB14_2: # %res_block
+; X64-NEXT:  .LBB14_1: # %res_block
 ; X64-NEXT:    movl $1, %eax
 ; X64-NEXT:  .LBB14_3: # %endblock
 ; X64-NEXT:    testl %eax, %eax
@@ -505,27 +505,28 @@
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length12:
-; X64:       # BB#0:
+; X64:       # BB#0: # %loadbb
 ; X64-NEXT:    movq (%rdi), %rcx
 ; X64-NEXT:    movq (%rsi), %rdx
 ; X64-NEXT:    bswapq %rcx
 ; X64-NEXT:    bswapq %rdx
 ; X64-NEXT:    cmpq %rdx, %rcx
-; X64-NEXT:    jne .LBB15_2
-; X64-NEXT:  # BB#1: # %loadbb1
+; X64-NEXT:    jne .LBB15_1
+; X64-NEXT:  # BB#2: # %loadbb1
 ; X64-NEXT:    movl 8(%rdi), %ecx
 ; X64-NEXT:    movl 8(%rsi), %edx
 ; X64-NEXT:    bswapl %ecx
 ; X64-NEXT:    bswapl %edx
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    cmpq %rdx, %rcx
-; X64-NEXT:    je .LBB15_3
-; X64-NEXT:  .LBB15_2: # %res_block
+; X64-NEXT:    jne .LBB15_1
+; X64-NEXT:  # BB#3: # %endblock
+; X64-NEXT:    retq
+; X64-NEXT:  .LBB15_1: # %res_block
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    cmpq %rdx, %rcx
 ; X64-NEXT:    setae %al
 ; X64-NEXT:    leal -1(%rax,%rax), %eax
-; X64-NEXT:  .LBB15_3: # %endblock
 ; X64-NEXT:    retq
   %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind
   ret i32 %m
@@ -545,27 +546,28 @@
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: length16:
-; X64:       # BB#0:
+; X64:       # BB#0: # %loadbb
 ; X64-NEXT:    movq (%rdi), %rcx
 ; X64-NEXT:    movq (%rsi), %rdx
 ; X64-NEXT:    bswapq %rcx
 ; X64-NEXT:    bswapq %rdx
 ; X64-NEXT:    cmpq %rdx, %rcx
-; X64-NEXT:    jne .LBB16_2
-; X64-NEXT:  # BB#1: # %loadbb1
+; X64-NEXT:    jne .LBB16_1
+; X64-NEXT:  # BB#2: # %loadbb1
 ; X64-NEXT:    movq 8(%rdi), %rcx
 ; X64-NEXT:    movq 8(%rsi), %rdx
 ; X64-NEXT:    bswapq %rcx
 ; X64-NEXT:    bswapq %rdx
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    cmpq %rdx, %rcx
-; X64-NEXT:    je .LBB16_3
-; X64-NEXT:  .LBB16_2: # %res_block
+; X64-NEXT:    jne .LBB16_1
+; X64-NEXT:  # BB#3: # %endblock
+; X64-NEXT:    retq
+; X64-NEXT:  .LBB16_1: # %res_block
 ; X64-NEXT:    xorl %eax, %eax
 ; X64-NEXT:    cmpq %rdx, %rcx
 ; X64-NEXT:    setae %al
 ; X64-NEXT:    leal -1(%rax,%rax), %eax
-; X64-NEXT:  .LBB16_3: # %endblock
 ; X64-NEXT:    retq
   %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 16) nounwind
   ret i32 %m
@@ -699,19 +701,19 @@
 ; X86-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length24_eq:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # BB#0: # %loadbb
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; X64-SSE2-NEXT:    movdqu (%rsi), %xmm1
 ; X64-SSE2-NEXT:    pcmpeqb %xmm0, %xmm1
 ; X64-SSE2-NEXT:    pmovmskb %xmm1, %eax
 ; X64-SSE2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
-; X64-SSE2-NEXT:    jne .LBB20_2
-; X64-SSE2-NEXT:  # BB#1: # %loadbb1
+; X64-SSE2-NEXT:    jne .LBB20_1
+; X64-SSE2-NEXT:  # BB#2: # %loadbb1
 ; X64-SSE2-NEXT:    movq 16(%rdi), %rcx
 ; X64-SSE2-NEXT:    xorl %eax, %eax
 ; X64-SSE2-NEXT:    cmpq 16(%rsi), %rcx
 ; X64-SSE2-NEXT:    je .LBB20_3
-; X64-SSE2-NEXT:  .LBB20_2: # %res_block
+; X64-SSE2-NEXT:  .LBB20_1: # %res_block
 ; X64-SSE2-NEXT:    movl $1, %eax
 ; X64-SSE2-NEXT:  .LBB20_3: # %endblock
 ; X64-SSE2-NEXT:    testl %eax, %eax
@@ -719,18 +721,18 @@
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length24_eq:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # BB#0: # %loadbb
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %xmm0
 ; X64-AVX2-NEXT:    vpcmpeqb (%rsi), %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpmovmskb %xmm0, %eax
 ; X64-AVX2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
-; X64-AVX2-NEXT:    jne .LBB20_2
-; X64-AVX2-NEXT:  # BB#1: # %loadbb1
+; X64-AVX2-NEXT:    jne .LBB20_1
+; X64-AVX2-NEXT:  # BB#2: # %loadbb1
 ; X64-AVX2-NEXT:    movq 16(%rdi), %rcx
 ; X64-AVX2-NEXT:    xorl %eax, %eax
 ; X64-AVX2-NEXT:    cmpq 16(%rsi), %rcx
 ; X64-AVX2-NEXT:    je .LBB20_3
-; X64-AVX2-NEXT:  .LBB20_2: # %res_block
+; X64-AVX2-NEXT:  .LBB20_1: # %res_block
 ; X64-AVX2-NEXT:    movl $1, %eax
 ; X64-AVX2-NEXT:  .LBB20_3: # %endblock
 ; X64-AVX2-NEXT:    testl %eax, %eax
@@ -755,18 +757,18 @@
 ; X86-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length24_eq_const:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # BB#0: # %loadbb
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; X64-SSE2-NEXT:    pcmpeqb {{.*}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pmovmskb %xmm0, %eax
 ; X64-SSE2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
-; X64-SSE2-NEXT:    jne .LBB21_2
-; X64-SSE2-NEXT:  # BB#1: # %loadbb1
+; X64-SSE2-NEXT:    jne .LBB21_1
+; X64-SSE2-NEXT:  # BB#2: # %loadbb1
 ; X64-SSE2-NEXT:    xorl %eax, %eax
 ; X64-SSE2-NEXT:    movabsq $3689065127958034230, %rcx # imm = 0x3332313039383736
 ; X64-SSE2-NEXT:    cmpq %rcx, 16(%rdi)
 ; X64-SSE2-NEXT:    je .LBB21_3
-; X64-SSE2-NEXT:  .LBB21_2: # %res_block
+; X64-SSE2-NEXT:  .LBB21_1: # %res_block
 ; X64-SSE2-NEXT:    movl $1, %eax
 ; X64-SSE2-NEXT:  .LBB21_3: # %endblock
 ; X64-SSE2-NEXT:    testl %eax, %eax
@@ -774,18 +776,18 @@
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length24_eq_const:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # BB#0: # %loadbb
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %xmm0
 ; X64-AVX2-NEXT:    vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpmovmskb %xmm0, %eax
 ; X64-AVX2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
-; X64-AVX2-NEXT:    jne .LBB21_2
-; X64-AVX2-NEXT:  # BB#1: # %loadbb1
+; X64-AVX2-NEXT:    jne .LBB21_1
+; X64-AVX2-NEXT:  # BB#2: # %loadbb1
 ; X64-AVX2-NEXT:    xorl %eax, %eax
 ; X64-AVX2-NEXT:    movabsq $3689065127958034230, %rcx # imm = 0x3332313039383736
 ; X64-AVX2-NEXT:    cmpq %rcx, 16(%rdi)
 ; X64-AVX2-NEXT:    je .LBB21_3
-; X64-AVX2-NEXT:  .LBB21_2: # %res_block
+; X64-AVX2-NEXT:  .LBB21_1: # %res_block
 ; X64-AVX2-NEXT:    movl $1, %eax
 ; X64-AVX2-NEXT:  .LBB21_3: # %endblock
 ; X64-AVX2-NEXT:    testl %eax, %eax
@@ -831,7 +833,7 @@
 ; X86-NOSSE-NEXT:    retl
 ;
 ; X86-SSE2-LABEL: length32_eq:
-; X86-SSE2:       # BB#0:
+; X86-SSE2:       # BB#0: # %loadbb
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SSE2-NEXT:    movdqu (%ecx), %xmm0
@@ -839,8 +841,8 @@
 ; X86-SSE2-NEXT:    pcmpeqb %xmm0, %xmm1
 ; X86-SSE2-NEXT:    pmovmskb %xmm1, %edx
 ; X86-SSE2-NEXT:    cmpl $65535, %edx # imm = 0xFFFF
-; X86-SSE2-NEXT:    jne .LBB23_2
-; X86-SSE2-NEXT:  # BB#1: # %loadbb1
+; X86-SSE2-NEXT:    jne .LBB23_1
+; X86-SSE2-NEXT:  # BB#2: # %loadbb1
 ; X86-SSE2-NEXT:    movdqu 16(%ecx), %xmm0
 ; X86-SSE2-NEXT:    movdqu 16(%eax), %xmm1
 ; X86-SSE2-NEXT:    pcmpeqb %xmm0, %xmm1
@@ -848,7 +850,7 @@
 ; X86-SSE2-NEXT:    xorl %eax, %eax
 ; X86-SSE2-NEXT:    cmpl $65535, %ecx # imm = 0xFFFF
 ; X86-SSE2-NEXT:    je .LBB23_3
-; X86-SSE2-NEXT:  .LBB23_2: # %res_block
+; X86-SSE2-NEXT:  .LBB23_1: # %res_block
 ; X86-SSE2-NEXT:    xorl %eax, %eax
 ; X86-SSE2-NEXT:    incl %eax
 ; X86-SSE2-NEXT:  .LBB23_3: # %endblock
@@ -857,14 +859,14 @@
 ; X86-SSE2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length32_eq:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # BB#0: # %loadbb
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; X64-SSE2-NEXT:    movdqu (%rsi), %xmm1
 ; X64-SSE2-NEXT:    pcmpeqb %xmm0, %xmm1
 ; X64-SSE2-NEXT:    pmovmskb %xmm1, %eax
 ; X64-SSE2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
-; X64-SSE2-NEXT:    jne .LBB23_2
-; X64-SSE2-NEXT:  # BB#1: # %loadbb1
+; X64-SSE2-NEXT:    jne .LBB23_1
+; X64-SSE2-NEXT:  # BB#2: # %loadbb1
 ; X64-SSE2-NEXT:    movdqu 16(%rdi), %xmm0
 ; X64-SSE2-NEXT:    movdqu 16(%rsi), %xmm1
 ; X64-SSE2-NEXT:    pcmpeqb %xmm0, %xmm1
@@ -872,7 +874,7 @@
 ; X64-SSE2-NEXT:    xorl %eax, %eax
 ; X64-SSE2-NEXT:    cmpl $65535, %ecx # imm = 0xFFFF
 ; X64-SSE2-NEXT:    je .LBB23_3
-; X64-SSE2-NEXT:  .LBB23_2: # %res_block
+; X64-SSE2-NEXT:  .LBB23_1: # %res_block
 ; X64-SSE2-NEXT:    movl $1, %eax
 ; X64-SSE2-NEXT:  .LBB23_3: # %endblock
 ; X64-SSE2-NEXT:    testl %eax, %eax
@@ -907,21 +909,21 @@
 ; X86-NOSSE-NEXT:    retl
 ;
 ; X86-SSE2-LABEL: length32_eq_const:
-; X86-SSE2:       # BB#0:
+; X86-SSE2:       # BB#0: # %loadbb
 ; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE2-NEXT:    movdqu (%eax), %xmm0
 ; X86-SSE2-NEXT:    pcmpeqb {{\.LCPI.*}}, %xmm0
 ; X86-SSE2-NEXT:    pmovmskb %xmm0, %ecx
 ; X86-SSE2-NEXT:    cmpl $65535, %ecx # imm = 0xFFFF
-; X86-SSE2-NEXT:    jne .LBB24_2
-; X86-SSE2-NEXT:  # BB#1: # %loadbb1
+; X86-SSE2-NEXT:    jne .LBB24_1
+; X86-SSE2-NEXT:  # BB#2: # %loadbb1
 ; X86-SSE2-NEXT:    movdqu 16(%eax), %xmm0
 ; X86-SSE2-NEXT:    pcmpeqb {{\.LCPI.*}}, %xmm0
 ; X86-SSE2-NEXT:    pmovmskb %xmm0, %ecx
 ; X86-SSE2-NEXT:    xorl %eax, %eax
 ; X86-SSE2-NEXT:    cmpl $65535, %ecx # imm = 0xFFFF
 ; X86-SSE2-NEXT:    je .LBB24_3
-; X86-SSE2-NEXT:  .LBB24_2: # %res_block
+; X86-SSE2-NEXT:  .LBB24_1: # %res_block
 ; X86-SSE2-NEXT:    xorl %eax, %eax
 ; X86-SSE2-NEXT:    incl %eax
 ; X86-SSE2-NEXT:  .LBB24_3: # %endblock
@@ -930,20 +932,20 @@
 ; X86-SSE2-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: length32_eq_const:
-; X64-SSE2:       # BB#0:
+; X64-SSE2:       # BB#0: # %loadbb
 ; X64-SSE2-NEXT:    movdqu (%rdi), %xmm0
 ; X64-SSE2-NEXT:    pcmpeqb {{.*}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pmovmskb %xmm0, %eax
 ; X64-SSE2-NEXT:    cmpl $65535, %eax # imm = 0xFFFF
-; X64-SSE2-NEXT:    jne .LBB24_2
-; X64-SSE2-NEXT:  # BB#1: # %loadbb1
+; X64-SSE2-NEXT:    jne .LBB24_1
+; X64-SSE2-NEXT:  # BB#2: # %loadbb1
 ; X64-SSE2-NEXT:    movdqu 16(%rdi), %xmm0
 ; X64-SSE2-NEXT:    pcmpeqb {{.*}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pmovmskb %xmm0, %ecx
 ; X64-SSE2-NEXT:    xorl %eax, %eax
 ; X64-SSE2-NEXT:    cmpl $65535, %ecx # imm = 0xFFFF
 ; X64-SSE2-NEXT:    je .LBB24_3
-; X64-SSE2-NEXT:  .LBB24_2: # %res_block
+; X64-SSE2-NEXT:  .LBB24_1: # %res_block
 ; X64-SSE2-NEXT:    movl $1, %eax
 ; X64-SSE2-NEXT:  .LBB24_3: # %endblock
 ; X64-SSE2-NEXT:    testl %eax, %eax
@@ -1007,20 +1009,20 @@
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length64_eq:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # BB#0: # %loadbb
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb (%rsi), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %eax
 ; X64-AVX2-NEXT:    cmpl $-1, %eax
-; X64-AVX2-NEXT:    jne .LBB26_2
-; X64-AVX2-NEXT:  # BB#1: # %loadbb1
+; X64-AVX2-NEXT:    jne .LBB26_1
+; X64-AVX2-NEXT:  # BB#2: # %loadbb1
 ; X64-AVX2-NEXT:    vmovdqu 32(%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb 32(%rsi), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %ecx
 ; X64-AVX2-NEXT:    xorl %eax, %eax
 ; X64-AVX2-NEXT:    cmpl $-1, %ecx
 ; X64-AVX2-NEXT:    je .LBB26_3
-; X64-AVX2-NEXT:  .LBB26_2: # %res_block
+; X64-AVX2-NEXT:  .LBB26_1: # %res_block
 ; X64-AVX2-NEXT:    movl $1, %eax
 ; X64-AVX2-NEXT:  .LBB26_3: # %endblock
 ; X64-AVX2-NEXT:    testl %eax, %eax
@@ -1057,20 +1059,20 @@
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX2-LABEL: length64_eq_const:
-; X64-AVX2:       # BB#0:
+; X64-AVX2:       # BB#0: # %loadbb
 ; X64-AVX2-NEXT:    vmovdqu (%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %eax
 ; X64-AVX2-NEXT:    cmpl $-1, %eax
-; X64-AVX2-NEXT:    jne .LBB27_2
-; X64-AVX2-NEXT:  # BB#1: # %loadbb1
+; X64-AVX2-NEXT:    jne .LBB27_1
+; X64-AVX2-NEXT:  # BB#2: # %loadbb1
 ; X64-AVX2-NEXT:    vmovdqu 32(%rdi), %ymm0
 ; X64-AVX2-NEXT:    vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
 ; X64-AVX2-NEXT:    vpmovmskb %ymm0, %ecx
 ; X64-AVX2-NEXT:    xorl %eax, %eax
 ; X64-AVX2-NEXT:    cmpl $-1, %ecx
 ; X64-AVX2-NEXT:    je .LBB27_3
-; X64-AVX2-NEXT:  .LBB27_2: # %res_block
+; X64-AVX2-NEXT:  .LBB27_1: # %res_block
 ; X64-AVX2-NEXT:    movl $1, %eax
 ; X64-AVX2-NEXT:  .LBB27_3: # %endblock
 ; X64-AVX2-NEXT:    testl %eax, %eax