Split the Add, Sub, and Mul instruction opcodes into separate
integer and floating-point opcodes, introducing
FAdd, FSub, and FMul.

For now, the AsmParser, BitcodeReader, and IRBuilder all preserve
backwards compatability, and the Core LLVM APIs preserve backwards
compatibility for IR producers. Most front-ends won't need to change
immediately.

This implements the first step of the plan outlined here:
http://nondot.org/sabre/LLVMNotes/IntegerOverflow.txt


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@72897 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll b/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll
index 4db5773..071c788 100644
--- a/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll
+++ b/test/CodeGen/PowerPC/2008-10-28-f128-i32.ll
@@ -6,17 +6,17 @@
 	br i1 %0, label %bb5, label %bb1
 
 bb1:		; preds = %entry
-	%1 = mul ppc_fp128 %a, 0xM3DF00000000000000000000000000000		; <ppc_fp128> [#uses=1]
+	%1 = fmul ppc_fp128 %a, 0xM3DF00000000000000000000000000000		; <ppc_fp128> [#uses=1]
 	%2 = fptoui ppc_fp128 %1 to i32		; <i32> [#uses=1]
 	%3 = zext i32 %2 to i64		; <i64> [#uses=1]
 	%4 = shl i64 %3, 32		; <i64> [#uses=3]
 	%5 = uitofp i64 %4 to ppc_fp128		; <ppc_fp128> [#uses=1]
-	%6 = sub ppc_fp128 %a, %5		; <ppc_fp128> [#uses=3]
+	%6 = fsub ppc_fp128 %a, %5		; <ppc_fp128> [#uses=3]
 	%7 = fcmp olt ppc_fp128 %6, 0xM00000000000000000000000000000000		; <i1> [#uses=1]
 	br i1 %7, label %bb2, label %bb3
 
 bb2:		; preds = %bb1
-	%8 = sub ppc_fp128 0xM80000000000000000000000000000000, %6		; <ppc_fp128> [#uses=1]
+	%8 = fsub ppc_fp128 0xM80000000000000000000000000000000, %6		; <ppc_fp128> [#uses=1]
 	%9 = fptoui ppc_fp128 %8 to i32		; <i32> [#uses=1]
 	%10 = zext i32 %9 to i64		; <i64> [#uses=1]
 	%11 = sub i64 %4, %10		; <i64> [#uses=1]