This is the patch to provide clean intrinsic function overloading support in LLVM. It cleans up the intrinsic definitions and generally smooths the process for more complicated intrinsic writing. It will be used by the upcoming atomic intrinsics as well as vector and float intrinsics in the future.

This also changes the syntax for llvm.bswap, llvm.part.set, llvm.part.select, and llvm.ct* intrinsics. They are automatically upgraded by both the LLVM ASM reader and the bitcode reader. The test cases have been updated, with special tests added to ensure the automatic upgrading is supported.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40807 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/Alpha/ctlz.ll b/test/CodeGen/Alpha/ctlz.ll
index 0ad014d..fba6022 100644
--- a/test/CodeGen/Alpha/ctlz.ll
+++ b/test/CodeGen/Alpha/ctlz.ll
@@ -5,10 +5,11 @@
 ; RUN: llvm-as < %s | llc -march=alpha -mcpu=ev56 | not grep -i ctlz
 ; RUN: llvm-as < %s | llc -march=alpha -mattr=-CIX | not grep -i ctlz
 
-declare i32 @llvm.ctlz.i8(i8)
+declare i8 @llvm.ctlz.i8(i8)
 
 define i32 @bar(i8 %x) {
 entry:
-	%tmp.1 = call i32 @llvm.ctlz.i8( i8 %x ) 
-	ret i32 %tmp.1
+	%tmp.1 = call i8 @llvm.ctlz.i8( i8 %x ) 
+	%tmp.2 = sext i8 %tmp.1 to i32
+	ret i32 %tmp.2
 }
diff --git a/test/CodeGen/Generic/bit-intrinsics.ll b/test/CodeGen/Generic/bit-intrinsics.ll
index 427387f..9541011 100644
--- a/test/CodeGen/Generic/bit-intrinsics.ll
+++ b/test/CodeGen/Generic/bit-intrinsics.ll
@@ -3,21 +3,21 @@
 ; RUN: llvm-as < %s > %t.bc
 ; RUN: lli --force-interpreter=true %t.bc
 
-declare i32 @llvm.part.set.i32.i32.i32(i32 %x, i32 %rep, i32 %hi, i32 %lo)
-declare i16 @llvm.part.set.i16.i16.i16(i16 %x, i16 %rep, i32 %hi, i32 %lo)
+declare i32 @llvm.part.set.i32.i32(i32 %x, i32 %rep, i32 %hi, i32 %lo)
+declare i16 @llvm.part.set.i16.i16(i16 %x, i16 %rep, i32 %hi, i32 %lo)
 define i32 @test_part_set(i32 %A, i16 %B) {
-  %a = call i32 @llvm.part.set.i32.i32.i32(i32 %A, i32 27, i32 8, i32 0)
-  %b = call i16 @llvm.part.set.i16.i16.i16(i16 %B, i16 27, i32 8, i32 0)
+  %a = call i32 @llvm.part.set.i32.i32(i32 %A, i32 27, i32 8, i32 0)
+  %b = call i16 @llvm.part.set.i16.i16(i16 %B, i16 27, i32 8, i32 0)
   %c = zext i16 %b to i32
   %d = add i32 %a, %c
   ret i32 %d
 }
 
-declare i32 @llvm.part.select.i32.i32(i32 %x, i32 %hi, i32 %lo)
-declare i16 @llvm.part.select.i16.i16(i16 %x, i32 %hi, i32 %lo)
+declare i32 @llvm.part.select.i32(i32 %x, i32 %hi, i32 %lo)
+declare i16 @llvm.part.select.i16(i16 %x, i32 %hi, i32 %lo)
 define i32 @test_part_select(i32 %A, i16 %B) {
-  %a = call i32 @llvm.part.select.i32.i32(i32 %A, i32 8, i32 0)
-  %b = call i16 @llvm.part.select.i16.i16(i16 %B, i32 8, i32 0)
+  %a = call i32 @llvm.part.select.i32(i32 %A, i32 8, i32 0)
+  %b = call i16 @llvm.part.select.i16(i16 %B, i32 8, i32 0)
   %c = zext i16 %b to i32
   %d = add i32 %a, %c
   ret i32 %d
diff --git a/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll b/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll
index 1ea6174..098e748 100644
--- a/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll
+++ b/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll
@@ -2,10 +2,11 @@
 
 define i32 @_ZNK4llvm5APInt17countLeadingZerosEv(i64 *%t) {
         %tmp19 = load i64* %t
-        %tmp23 = tail call i32 @llvm.ctlz.i64( i64 %tmp19 )             ; <i64> [#uses=1]
+        %tmp22 = tail call i64 @llvm.ctlz.i64( i64 %tmp19 )             ; <i64> [#uses=1]
+        %tmp23 = trunc i64 %tmp22 to i32
         %tmp89 = add i32 %tmp23, -64          ; <i32> [#uses=1]
         %tmp90 = add i32 %tmp89, 0            ; <i32> [#uses=1]
         ret i32 %tmp90
 }
 
-declare i32 @llvm.ctlz.i64(i64)
+declare i64 @llvm.ctlz.i64(i64)