[X86][SSE] Dropped -mcpu from bitcast+setcc mask tests

Use triple and attribute only for consistency

llvm-svn: 307176
diff --git a/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll b/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
index a6d6ca1..9552968 100644
--- a/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
+++ b/llvm/test/CodeGen/X86/bitcast-and-setcc-256.ll
@@ -1,13 +1,13 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+SSE2 < %s | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+SSSE3 < %s | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx < %s | FileCheck %s --check-prefixes=AVX12,AVX1
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefixes=AVX12,AVX2
-; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=x86-64 -mattr=+avx512f,+avx512vl,+avx512bw < %s | FileCheck %s --check-prefix=AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+SSE2 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+SSSE3 | FileCheck %s --check-prefixes=SSE2-SSSE3,SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX12,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX12,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+avx512bw | FileCheck %s --check-prefix=AVX512
 
 define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
 ; SSE2-SSSE3-LABEL: v4i64:
-; SSE2-SSSE3:       ## BB#0:
+; SSE2-SSSE3:       # BB#0:
 ; SSE2-SSSE3-NEXT:    movdqa {{.*#+}} xmm8 = [2147483648,0,2147483648,0]
 ; SSE2-SSSE3-NEXT:    pxor %xmm8, %xmm3
 ; SSE2-SSSE3-NEXT:    pxor %xmm8, %xmm1
@@ -57,11 +57,11 @@
 ; SSE2-SSSE3-NEXT:    psrad $31, %xmm2
 ; SSE2-SSSE3-NEXT:    pand %xmm0, %xmm2
 ; SSE2-SSSE3-NEXT:    movmskps %xmm2, %eax
-; SSE2-SSSE3-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
 ; SSE2-SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: v4i64:
-; AVX1:       ## BB#0:
+; AVX1:       # BB#0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpcmpgtq %xmm4, %xmm5, %xmm4
@@ -74,12 +74,12 @@
 ; AVX1-NEXT:    vpacksswb %xmm1, %xmm2, %xmm1
 ; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vmovmskps %xmm0, %eax
-; AVX1-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: v4i64:
-; AVX2:       ## BB#0:
+; AVX2:       # BB#0:
 ; AVX2-NEXT:    vpcmpgtq %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
@@ -88,12 +88,12 @@
 ; AVX2-NEXT:    vpacksswb %xmm2, %xmm1, %xmm1
 ; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vmovmskps %xmm0, %eax
-; AVX2-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: v4i64:
-; AVX512:       ## BB#0:
+; AVX512:       # BB#0:
 ; AVX512-NEXT:    vpcmpgtq %ymm1, %ymm0, %k1
 ; AVX512-NEXT:    vpcmpgtq %ymm3, %ymm2, %k0 {%k1}
 ; AVX512-NEXT:    kmovd %k0, %eax
@@ -110,7 +110,7 @@
 
 define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double> %d) {
 ; SSE2-SSSE3-LABEL: v4f64:
-; SSE2-SSSE3:       ## BB#0:
+; SSE2-SSSE3:       # BB#0:
 ; SSE2-SSSE3-NEXT:    cmpltpd %xmm1, %xmm3
 ; SSE2-SSSE3-NEXT:    cmpltpd %xmm0, %xmm2
 ; SSE2-SSSE3-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
@@ -123,11 +123,11 @@
 ; SSE2-SSSE3-NEXT:    psrad $31, %xmm6
 ; SSE2-SSSE3-NEXT:    pand %xmm2, %xmm6
 ; SSE2-SSSE3-NEXT:    movmskps %xmm6, %eax
-; SSE2-SSSE3-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-SSSE3-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
 ; SSE2-SSSE3-NEXT:    retq
 ;
 ; AVX12-LABEL: v4f64:
-; AVX12:       ## BB#0:
+; AVX12:       # BB#0:
 ; AVX12-NEXT:    vcmpltpd %ymm0, %ymm1, %ymm0
 ; AVX12-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX12-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
@@ -136,12 +136,12 @@
 ; AVX12-NEXT:    vpacksswb %xmm2, %xmm1, %xmm1
 ; AVX12-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX12-NEXT:    vmovmskps %xmm0, %eax
-; AVX12-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
 ; AVX12-NEXT:    vzeroupper
 ; AVX12-NEXT:    retq
 ;
 ; AVX512-LABEL: v4f64:
-; AVX512:       ## BB#0:
+; AVX512:       # BB#0:
 ; AVX512-NEXT:    vcmpltpd %ymm0, %ymm1, %k1
 ; AVX512-NEXT:    vcmpltpd %ymm2, %ymm3, %k0 {%k1}
 ; AVX512-NEXT:    kmovd %k0, %eax
@@ -158,7 +158,7 @@
 
 define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
 ; SSE2-LABEL: v16i16:
-; SSE2:       ## BB#0:
+; SSE2:       # BB#0:
 ; SSE2-NEXT:    pcmpgtw %xmm3, %xmm1
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
 ; SSE2-NEXT:    pand %xmm3, %xmm1
@@ -181,11 +181,11 @@
 ; SSE2-NEXT:    pcmpgtb %xmm4, %xmm2
 ; SSE2-NEXT:    pand %xmm1, %xmm2
 ; SSE2-NEXT:    pmovmskb %xmm2, %eax
-; SSE2-NEXT:    ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SSE2-NEXT:    # kill: %AX<def> %AX<kill> %EAX<kill>
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: v16i16:
-; SSSE3:       ## BB#0:
+; SSSE3:       # BB#0:
 ; SSSE3-NEXT:    pcmpgtw %xmm3, %xmm1
 ; SSSE3-NEXT:    movdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
 ; SSSE3-NEXT:    pshufb %xmm3, %xmm1
@@ -208,11 +208,11 @@
 ; SSSE3-NEXT:    pcmpgtb %xmm4, %xmm2
 ; SSSE3-NEXT:    pand %xmm1, %xmm2
 ; SSSE3-NEXT:    pmovmskb %xmm2, %eax
-; SSSE3-NEXT:    ## kill: %AX<def> %AX<kill> %EAX<kill>
+; SSSE3-NEXT:    # kill: %AX<def> %AX<kill> %EAX<kill>
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: v16i16:
-; AVX1:       ## BB#0:
+; AVX1:       # BB#0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpcmpgtw %xmm4, %xmm5, %xmm4
@@ -225,12 +225,12 @@
 ; AVX1-NEXT:    vpacksswb %xmm1, %xmm2, %xmm1
 ; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
-; AVX1-NEXT:    ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX1-NEXT:    # kill: %AX<def> %AX<kill> %EAX<kill>
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: v16i16:
-; AVX2:       ## BB#0:
+; AVX2:       # BB#0:
 ; AVX2-NEXT:    vpcmpgtw %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
@@ -239,16 +239,16 @@
 ; AVX2-NEXT:    vpacksswb %xmm2, %xmm1, %xmm1
 ; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpmovmskb %xmm0, %eax
-; AVX2-NEXT:    ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX2-NEXT:    # kill: %AX<def> %AX<kill> %EAX<kill>
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: v16i16:
-; AVX512:       ## BB#0:
+; AVX512:       # BB#0:
 ; AVX512-NEXT:    vpcmpgtw %ymm1, %ymm0, %k1
 ; AVX512-NEXT:    vpcmpgtw %ymm3, %ymm2, %k0 {%k1}
 ; AVX512-NEXT:    kmovd %k0, %eax
-; AVX512-NEXT:    ## kill: %AX<def> %AX<kill> %EAX<kill>
+; AVX512-NEXT:    # kill: %AX<def> %AX<kill> %EAX<kill>
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %x0 = icmp sgt <16 x i16> %a, %b
@@ -260,7 +260,7 @@
 
 define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
 ; SSE2-LABEL: v8i32:
-; SSE2:       ## BB#0:
+; SSE2:       # BB#0:
 ; SSE2-NEXT:    pcmpgtd %xmm3, %xmm1
 ; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
 ; SSE2-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
@@ -287,11 +287,11 @@
 ; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
 ; SSE2-NEXT:    packuswb %xmm2, %xmm2
 ; SSE2-NEXT:    pmovmskb %xmm2, %eax
-; SSE2-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: v8i32:
-; SSSE3:       ## BB#0:
+; SSSE3:       # BB#0:
 ; SSSE3-NEXT:    pcmpgtd %xmm3, %xmm1
 ; SSSE3-NEXT:    movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; SSSE3-NEXT:    pshufb %xmm3, %xmm1
@@ -310,11 +310,11 @@
 ; SSSE3-NEXT:    pand %xmm0, %xmm4
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm4 = xmm4[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; SSSE3-NEXT:    pmovmskb %xmm4, %eax
-; SSSE3-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSSE3-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
 ; SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: v8i32:
-; AVX1:       ## BB#0:
+; AVX1:       # BB#0:
 ; AVX1-NEXT:    vextractf128 $1, %ymm1, %xmm4
 ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm5
 ; AVX1-NEXT:    vpcmpgtd %xmm4, %xmm5, %xmm4
@@ -328,12 +328,12 @@
 ; AVX1-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX1-NEXT:    vpmovmskb %xmm0, %eax
-; AVX1-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX1-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: v8i32:
-; AVX2:       ## BB#0:
+; AVX2:       # BB#0:
 ; AVX2-NEXT:    vpcmpgtd %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX2-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
@@ -343,16 +343,16 @@
 ; AVX2-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX2-NEXT:    vpmovmskb %xmm0, %eax
-; AVX2-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX2-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: v8i32:
-; AVX512:       ## BB#0:
+; AVX512:       # BB#0:
 ; AVX512-NEXT:    vpcmpgtd %ymm1, %ymm0, %k1
 ; AVX512-NEXT:    vpcmpgtd %ymm3, %ymm2, %k0 {%k1}
 ; AVX512-NEXT:    kmovd %k0, %eax
-; AVX512-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %x0 = icmp sgt <8 x i32> %a, %b
@@ -364,7 +364,7 @@
 
 define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d) {
 ; SSE2-LABEL: v8f32:
-; SSE2:       ## BB#0:
+; SSE2:       # BB#0:
 ; SSE2-NEXT:    cmpltps %xmm1, %xmm3
 ; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm3[0,2,2,3,4,5,6,7]
 ; SSE2-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
@@ -391,11 +391,11 @@
 ; SSE2-NEXT:    pand {{.*}}(%rip), %xmm2
 ; SSE2-NEXT:    packuswb %xmm2, %xmm2
 ; SSE2-NEXT:    pmovmskb %xmm2, %eax
-; SSE2-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSE2-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
 ; SSE2-NEXT:    retq
 ;
 ; SSSE3-LABEL: v8f32:
-; SSSE3:       ## BB#0:
+; SSSE3:       # BB#0:
 ; SSSE3-NEXT:    cmpltps %xmm1, %xmm3
 ; SSSE3-NEXT:    movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
 ; SSSE3-NEXT:    pshufb %xmm1, %xmm3
@@ -414,11 +414,11 @@
 ; SSSE3-NEXT:    pand %xmm2, %xmm6
 ; SSSE3-NEXT:    pshufb {{.*#+}} xmm6 = xmm6[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; SSSE3-NEXT:    pmovmskb %xmm6, %eax
-; SSSE3-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; SSSE3-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
 ; SSSE3-NEXT:    retq
 ;
 ; AVX12-LABEL: v8f32:
-; AVX12:       ## BB#0:
+; AVX12:       # BB#0:
 ; AVX12-NEXT:    vcmpltps %ymm0, %ymm1, %ymm0
 ; AVX12-NEXT:    vextractf128 $1, %ymm0, %xmm1
 ; AVX12-NEXT:    vpacksswb %xmm1, %xmm0, %xmm0
@@ -428,16 +428,16 @@
 ; AVX12-NEXT:    vpand %xmm1, %xmm0, %xmm0
 ; AVX12-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
 ; AVX12-NEXT:    vpmovmskb %xmm0, %eax
-; AVX12-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX12-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
 ; AVX12-NEXT:    vzeroupper
 ; AVX12-NEXT:    retq
 ;
 ; AVX512-LABEL: v8f32:
-; AVX512:       ## BB#0:
+; AVX512:       # BB#0:
 ; AVX512-NEXT:    vcmpltps %ymm0, %ymm1, %k1
 ; AVX512-NEXT:    vcmpltps %ymm2, %ymm3, %k0 {%k1}
 ; AVX512-NEXT:    kmovd %k0, %eax
-; AVX512-NEXT:    ## kill: %AL<def> %AL<kill> %EAX<kill>
+; AVX512-NEXT:    # kill: %AL<def> %AL<kill> %EAX<kill>
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
   %x0 = fcmp ogt <8 x float> %a, %b
@@ -449,7 +449,7 @@
 
 define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) {
 ; SSE2-SSSE3-LABEL: v32i8:
-; SSE2-SSSE3:       ## BB#0:
+; SSE2-SSSE3:       # BB#0:
 ; SSE2-SSSE3-NEXT:    pcmpgtb %xmm2, %xmm0
 ; SSE2-SSSE3-NEXT:    pcmpgtb %xmm3, %xmm1
 ; SSE2-SSSE3-NEXT:    pcmpgtb %xmm6, %xmm4
@@ -561,14 +561,14 @@
 ; SSE2-SSSE3-NEXT:    retq
 ;
 ; AVX1-LABEL: v32i8:
-; AVX1:       ## BB#0:
+; AVX1:       # BB#0:
 ; AVX1-NEXT:    pushq %rbp
-; AVX1-NEXT:  Lcfi0:
+; AVX1-NEXT:  .Lcfi0:
 ; AVX1-NEXT:    .cfi_def_cfa_offset 16
-; AVX1-NEXT:  Lcfi1:
+; AVX1-NEXT:  .Lcfi1:
 ; AVX1-NEXT:    .cfi_offset %rbp, -16
 ; AVX1-NEXT:    movq %rsp, %rbp
-; AVX1-NEXT:  Lcfi2:
+; AVX1-NEXT:  .Lcfi2:
 ; AVX1-NEXT:    .cfi_def_cfa_register %rbp
 ; AVX1-NEXT:    andq $-32, %rsp
 ; AVX1-NEXT:    subq $32, %rsp
@@ -687,7 +687,7 @@
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: v32i8:
-; AVX2:       ## BB#0:
+; AVX2:       # BB#0:
 ; AVX2-NEXT:    vpcmpgtb %ymm1, %ymm0, %ymm0
 ; AVX2-NEXT:    vpcmpgtb %ymm3, %ymm2, %ymm1
 ; AVX2-NEXT:    vpand %ymm1, %ymm0, %ymm0
@@ -696,7 +696,7 @@
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: v32i8:
-; AVX512:       ## BB#0:
+; AVX512:       # BB#0:
 ; AVX512-NEXT:    vpcmpgtb %ymm1, %ymm0, %k1
 ; AVX512-NEXT:    vpcmpgtb %ymm3, %ymm2, %k0 {%k1}
 ; AVX512-NEXT:    kmovd %k0, %eax