make basic block label matching more flexible for less sad buildbots

llvm-svn: 229535
diff --git a/llvm/test/CodeGen/X86/logical-load-fold.ll b/llvm/test/CodeGen/X86/logical-load-fold.ll
index 6051a5e..fa6b9e5 100644
--- a/llvm/test/CodeGen/X86/logical-load-fold.ll
+++ b/llvm/test/CodeGen/X86/logical-load-fold.ll
@@ -11,14 +11,14 @@
 
 define double @load_double_no_fold(double %x, double %y) {
 ; SSE2-LABEL: load_double_no_fold:
-; SSE2:       ## BB#0:
+; SSE2:       BB#0:
 ; SSE2-NEXT:    cmplesd %xmm0, %xmm1
 ; SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
 ; SSE2-NEXT:    andpd %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: load_double_no_fold:
-; AVX:       ## BB#0:
+; AVX:       BB#0:
 ; AVX-NEXT:    vcmplesd %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
 ; AVX-NEXT:    vandpd %xmm1, %xmm0, %xmm0
@@ -32,14 +32,14 @@
 
 define float @load_float_no_fold(float %x, float %y) {
 ; SSE2-LABEL: load_float_no_fold:
-; SSE2:       ## BB#0:
+; SSE2:       BB#0:
 ; SSE2-NEXT:    cmpless %xmm0, %xmm1
 ; SSE2-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; SSE2-NEXT:    andps %xmm1, %xmm0
 ; SSE2-NEXT:    retq
 ;
 ; AVX-LABEL: load_float_no_fold:
-; AVX:       ## BB#0:
+; AVX:       BB#0:
 ; AVX-NEXT:    vcmpless %xmm0, %xmm1, %xmm0
 ; AVX-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
 ; AVX-NEXT:    vandps %xmm1, %xmm0, %xmm0