Optimized loading (zextload) of i1 value from memory.
This patch is a partial revert of https://llvm.org/svn/llvm-project/llvm/trunk@237793.
Extra "and" causes performance degradation.

We assume that i1 is stored in zero-extended form. And store operation is responsible for zeroing upper bits.

Differential Revision: http://reviews.llvm.org/D17541

llvm-svn: 261828
diff --git a/llvm/test/CodeGen/X86/and-encoding.ll b/llvm/test/CodeGen/X86/and-encoding.ll
index f7bbac2..1a90bd0 100644
--- a/llvm/test/CodeGen/X86/and-encoding.ll
+++ b/llvm/test/CodeGen/X86/and-encoding.ll
@@ -15,27 +15,18 @@
   ret void
 }
 
-define void @f2(i1 *%x, i16 *%y) {
+define void @f2(i16 %x, i1 *%y) {
 ; CHECK-LABEL: f2:
-; CHECK: andl	$1, %eax                # encoding: [0x83,0xe0,0x01]
-  %a = load i1, i1* %x
-  %b = zext i1 %a to i16
-  store i16 %b, i16* %y
+; CHECK: andl	$1, %edi                # encoding: [0x83,0xe7,0x01]
+  %c = trunc i16 %x to i1
+  store i1 %c, i1* %y
   ret void
 }
 
-define i32 @f3(i1 *%x) {
+define void @f3(i32 %x, i1 *%y) {
 ; CHECK-LABEL: f3:
-; CHECK: andl	$1, %eax                # encoding: [0x83,0xe0,0x01]
-  %a = load i1, i1* %x
-  %b = zext i1 %a to i32
-  ret i32 %b
-}
-
-define i64 @f4(i1 *%x) {
-; CHECK-LABEL: f4:
-; CHECK: andl	$1, %eax                # encoding: [0x83,0xe0,0x01]
-  %a = load i1, i1* %x
-  %b = zext i1 %a to i64
-  ret i64 %b
+; CHECK: andl	$1, %edi                # encoding: [0x83,0xe7,0x01]
+  %c = trunc i32 %x to i1
+  store i1 %c, i1* %y
+  ret void
 }