Split the Add, Sub, and Mul instruction opcodes into separate
integer and floating-point opcodes, introducing
FAdd, FSub, and FMul.

For now, the AsmParser, BitcodeReader, and IRBuilder all preserve
backwards compatability, and the Core LLVM APIs preserve backwards
compatibility for IR producers. Most front-ends won't need to change
immediately.

This implements the first step of the plan outlined here:
http://nondot.org/sabre/LLVMNotes/IntegerOverflow.txt

llvm-svn: 72897
diff --git a/llvm/test/CodeGen/X86/fp_load_fold.ll b/llvm/test/CodeGen/X86/fp_load_fold.ll
index 7c33cb3..655ad3d 100644
--- a/llvm/test/CodeGen/X86/fp_load_fold.ll
+++ b/llvm/test/CodeGen/X86/fp_load_fold.ll
@@ -5,25 +5,25 @@
 
 define double @test_add(double %X, double* %P) {
 	%Y = load double* %P		; <double> [#uses=1]
-	%R = add double %X, %Y		; <double> [#uses=1]
+	%R = fadd double %X, %Y		; <double> [#uses=1]
 	ret double %R
 }
 
 define double @test_mul(double %X, double* %P) {
 	%Y = load double* %P		; <double> [#uses=1]
-	%R = mul double %X, %Y		; <double> [#uses=1]
+	%R = fmul double %X, %Y		; <double> [#uses=1]
 	ret double %R
 }
 
 define double @test_sub(double %X, double* %P) {
 	%Y = load double* %P		; <double> [#uses=1]
-	%R = sub double %X, %Y		; <double> [#uses=1]
+	%R = fsub double %X, %Y		; <double> [#uses=1]
 	ret double %R
 }
 
 define double @test_subr(double %X, double* %P) {
 	%Y = load double* %P		; <double> [#uses=1]
-	%R = sub double %Y, %X		; <double> [#uses=1]
+	%R = fsub double %Y, %X		; <double> [#uses=1]
 	ret double %R
 }