Split the Add, Sub, and Mul instruction opcodes into separate
integer and floating-point opcodes, introducing
FAdd, FSub, and FMul.

For now, the AsmParser, BitcodeReader, and IRBuilder all preserve
backwards compatability, and the Core LLVM APIs preserve backwards
compatibility for IR producers. Most front-ends won't need to change
immediately.

This implements the first step of the plan outlined here:
http://nondot.org/sabre/LLVMNotes/IntegerOverflow.txt


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@72897 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/ARM/vfp.ll b/test/CodeGen/ARM/vfp.ll
index 2acb33f..f58da44 100644
--- a/test/CodeGen/ARM/vfp.ll
+++ b/test/CodeGen/ARM/vfp.ll
@@ -39,10 +39,10 @@
 
 define void @test_add(float* %P, double* %D) {
 	%a = load float* %P		; <float> [#uses=2]
-	%b = add float %a, %a		; <float> [#uses=1]
+	%b = fadd float %a, %a		; <float> [#uses=1]
 	store float %b, float* %P
 	%A = load double* %D		; <double> [#uses=2]
-	%B = add double %A, %A		; <double> [#uses=1]
+	%B = fadd double %A, %A		; <double> [#uses=1]
 	store double %B, double* %D
 	ret void
 }
@@ -61,8 +61,8 @@
 	%a1 = load float* %P1		; <float> [#uses=1]
 	%a2 = load float* %P2		; <float> [#uses=1]
 	%a3 = load float* %P3		; <float> [#uses=1]
-	%X = mul float %a1, %a2		; <float> [#uses=1]
-	%Y = sub float %X, %a3		; <float> [#uses=1]
+	%X = fmul float %a1, %a2		; <float> [#uses=1]
+	%Y = fsub float %X, %a3		; <float> [#uses=1]
 	store float %Y, float* %P1
 	ret void
 }