[X86][FastISel] Use a COPY from K register to a GPR instead of a K operation
The KORTEST was introduced due to a bug where a TEST instruction used a K register.
but, turns out that the opposite case of KORTEST using a GPR is now happening
The change removes the KORTEST flow and adds a COPY instruction from the K reg to a GPR.
Differential Revision: https://reviews.llvm.org/D24953
llvm-svn: 282580
diff --git a/llvm/test/CodeGen/X86/avx512-fsel.ll b/llvm/test/CodeGen/X86/avx512-fsel.ll
index 2c9c42b8..0afaeae 100644
--- a/llvm/test/CodeGen/X86/avx512-fsel.ll
+++ b/llvm/test/CodeGen/X86/avx512-fsel.ll
@@ -26,7 +26,8 @@
; CHECK-NEXT: movb %dil, %r8b
; CHECK-NEXT: andl $1, %r8d
; CHECK-NEXT: kmovw %r8d, %k1
-; CHECK-NEXT: kortestw %k1, %k1
+; CHECK-NEXT: kmovw %k1, %ecx
+; CHECK-NEXT: testb $1, %cl
; CHECK-NEXT: movb %al, {{[0-9]+}}(%rsp) ## 1-byte Spill
; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill
; CHECK-NEXT: jne LBB0_1
diff --git a/llvm/test/CodeGen/X86/fast-isel-load-i1.ll b/llvm/test/CodeGen/X86/fast-isel-load-i1.ll
new file mode 100644
index 0000000..1b2e3c5
--- /dev/null
+++ b/llvm/test/CodeGen/X86/fast-isel-load-i1.ll
@@ -0,0 +1,15 @@
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f | FileCheck %s
+
+define i1 @test_i1(i1* %b) {
+; CHECK-LABEL: test_i1:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: testb $1, (%rdi)
+entry:
+ %0 = load i1, i1* %b, align 1
+ br i1 %0, label %in, label %out
+in:
+ ret i1 0
+out:
+ ret i1 1
+}
+
diff --git a/llvm/test/CodeGen/X86/fast-isel-select-cmov.ll b/llvm/test/CodeGen/X86/fast-isel-select-cmov.ll
index 290bcaa..a9b2dd8 100644
--- a/llvm/test/CodeGen/X86/fast-isel-select-cmov.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-select-cmov.ll
@@ -16,7 +16,8 @@
; AVX512-LABEL: select_cmov_i16:
; AVX512: ## BB#0:
; AVX512-NEXT: kmovw %edi, %k0
-; AVX512-NEXT: kortestw %k0, %k0
+; AVX512-NEXT: kmovw %k0, %eax
+; AVX512-NEXT: testb $1, %al
; AVX512-NEXT: cmovew %dx, %si
; AVX512-NEXT: movzwl %si, %eax
; AVX512-NEXT: retq
@@ -47,7 +48,8 @@
; AVX512-LABEL: select_cmov_i32:
; AVX512: ## BB#0:
; AVX512-NEXT: kmovw %edi, %k0
-; AVX512-NEXT: kortestw %k0, %k0
+; AVX512-NEXT: kmovw %k0, %eax
+; AVX512-NEXT: testb $1, %al
; AVX512-NEXT: cmovel %edx, %esi
; AVX512-NEXT: movl %esi, %eax
; AVX512-NEXT: retq
@@ -78,7 +80,8 @@
; AVX512-LABEL: select_cmov_i64:
; AVX512: ## BB#0:
; AVX512-NEXT: kmovw %edi, %k0
-; AVX512-NEXT: kortestw %k0, %k0
+; AVX512-NEXT: kmovw %k0, %eax
+; AVX512-NEXT: testb $1, %al
; AVX512-NEXT: cmoveq %rdx, %rsi
; AVX512-NEXT: movq %rsi, %rax
; AVX512-NEXT: retq