Rewrite fast-isel integer cast handling to handle more cases, and to be simpler and more consistent.
The practical effects here are that x86-64 fast-isel can now handle trunc from i8 to i1, and ARM fast-isel can handle many more constructs involving integers narrower than 32 bits (including loads, stores, and many integer casts).
rdar://9437928 .
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@132099 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/X86/fast-isel-i1.ll b/test/CodeGen/X86/fast-isel-i1.ll
index 5d572c1..8e01968 100644
--- a/test/CodeGen/X86/fast-isel-i1.ll
+++ b/test/CodeGen/X86/fast-isel-i1.ll
@@ -1,14 +1,15 @@
-; RUN: llc < %s -march=x86 -fast-isel | FileCheck %s
+; RUN: llc < %s -march=x86 -fast-isel -fast-isel-abort | FileCheck %s
+; RUN: llc < %s -march=x86-64 -fast-isel -fast-isel-abort | FileCheck %s
-declare i64 @test1a(i64)
+declare i32 @test1a(i32)
-define i32 @test1(i64 %x) nounwind {
+define i32 @test1(i32 %x) nounwind {
; CHECK: test1:
; CHECK: andb $1, %
- %y = add i64 %x, -3
- %t = call i64 @test1a(i64 %y)
- %s = mul i64 %t, 77
- %z = trunc i64 %s to i1
+ %y = add i32 %x, -3
+ %t = call i32 @test1a(i32 %y)
+ %s = mul i32 %t, 77
+ %z = trunc i32 %s to i1
br label %next
next: ; preds = %0