Split the Add, Sub, and Mul instruction opcodes into separate
integer and floating-point opcodes, introducing
FAdd, FSub, and FMul.

For now, the AsmParser, BitcodeReader, and IRBuilder all preserve
backwards compatability, and the Core LLVM APIs preserve backwards
compatibility for IR producers. Most front-ends won't need to change
immediately.

This implements the first step of the plan outlined here:
http://nondot.org/sabre/LLVMNotes/IntegerOverflow.txt


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@72897 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/PowerPC/2006-10-11-combiner-aa-regression.ll b/test/CodeGen/PowerPC/2006-10-11-combiner-aa-regression.ll
index a58cd16..7a65c00 100644
--- a/test/CodeGen/PowerPC/2006-10-11-combiner-aa-regression.ll
+++ b/test/CodeGen/PowerPC/2006-10-11-combiner-aa-regression.ll
@@ -9,15 +9,15 @@
 entry:
         %tmp = getelementptr %struct.Point* %pt, i32 0, i32 0           ; <double*> [#uses=2]
         %tmp.upgrd.1 = load double* %tmp                ; <double> [#uses=1]
-        %tmp2 = add double %tmp.upgrd.1, %x             ; <double> [#uses=1]
+        %tmp2 = fadd double %tmp.upgrd.1, %x             ; <double> [#uses=1]
         store double %tmp2, double* %tmp
         %tmp6 = getelementptr %struct.Point* %pt, i32 0, i32 1          ; <double*> [#uses=2]
         %tmp7 = load double* %tmp6              ; <double> [#uses=1]
-        %tmp9 = add double %tmp7, %y            ; <double> [#uses=1]
+        %tmp9 = fadd double %tmp7, %y            ; <double> [#uses=1]
         store double %tmp9, double* %tmp6
         %tmp13 = getelementptr %struct.Point* %pt, i32 0, i32 2         ; <double*> [#uses=2]
         %tmp14 = load double* %tmp13            ; <double> [#uses=1]
-        %tmp16 = add double %tmp14, %z          ; <double> [#uses=1]
+        %tmp16 = fadd double %tmp14, %z          ; <double> [#uses=1]
         store double %tmp16, double* %tmp13
         ret void
 }