[x86] add common check prefix to reduce duplication; NFC

llvm-svn: 288522
diff --git a/llvm/test/CodeGen/X86/not-and-simplify.ll b/llvm/test/CodeGen/X86/not-and-simplify.ll
index 534e035..3bee447 100644
--- a/llvm/test/CodeGen/X86/not-and-simplify.ll
+++ b/llvm/test/CodeGen/X86/not-and-simplify.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-bmi | FileCheck %s --check-prefix=NO_BMI
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi | FileCheck %s --check-prefix=BMI
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-bmi | FileCheck %s --check-prefix=ALL --check-prefix=NO_BMI
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+bmi | FileCheck %s --check-prefix=ALL --check-prefix=BMI
 
 ; Clear high bits via shift, set them with xor (not), then mask them off.
 
@@ -29,21 +29,13 @@
 ; Clear low bits via shift, set them with xor (not), then mask them off.
 
 define i8 @shrink_xor_constant2(i8 %x) {
-; NO_BMI-LABEL: shrink_xor_constant2:
-; NO_BMI:       # BB#0:
-; NO_BMI-NEXT:    shlb $5, %dil
-; NO_BMI-NEXT:    notb %dil
-; NO_BMI-NEXT:    andb $-32, %dil
-; NO_BMI-NEXT:    movl %edi, %eax
-; NO_BMI-NEXT:    retq
-;
-; BMI-LABEL: shrink_xor_constant2:
-; BMI:       # BB#0:
-; BMI-NEXT:    shlb $5, %dil
-; BMI-NEXT:    notb %dil
-; BMI-NEXT:    andb $-32, %dil
-; BMI-NEXT:    movl %edi, %eax
-; BMI-NEXT:    retq
+; ALL-LABEL: shrink_xor_constant2:
+; ALL:       # BB#0:
+; ALL-NEXT:    shlb $5, %dil
+; ALL-NEXT:    notb %dil
+; ALL-NEXT:    andb $-32, %dil
+; ALL-NEXT:    movl %edi, %eax
+; ALL-NEXT:    retq
 ;
   %sh = shl i8 %x, 5
   %not = xor i8 %sh, -1