[RISCV] Implement RV64D codegen

This patch:
* Adds necessary RV64D codegen patterns
* Modifies CC_RISCV so it will properly handle f64 types (with soft float ABI)

Note that in general there is no reason to try to select fcvt.w[u].d rather than fcvt.l[u].d for i32 conversions because fptosi/fptoui produce poison if the input won't fit into the target type.

Differential Revision: https://reviews.llvm.org/D53237

llvm-svn: 352833
diff --git a/llvm/test/CodeGen/RISCV/double-fcmp.ll b/llvm/test/CodeGen/RISCV/double-fcmp.ll
index adcd804..25c8f6d 100644
--- a/llvm/test/CodeGen/RISCV/double-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-fcmp.ll
@@ -1,12 +1,19 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
 ; RUN:   | FileCheck -check-prefix=RV32IFD %s
+; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
+; RUN:   | FileCheck -check-prefix=RV64IFD %s
 
 define i32 @fcmp_false(double %a, double %b) nounwind {
 ; RV32IFD-LABEL: fcmp_false:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    mv a0, zero
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_false:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    mv a0, zero
+; RV64IFD-NEXT:    ret
   %1 = fcmp false double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -25,6 +32,13 @@
 ; RV32IFD-NEXT:    feq.d a0, ft1, ft0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_oeq:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    feq.d a0, ft1, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp oeq double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -43,6 +57,13 @@
 ; RV32IFD-NEXT:    flt.d a0, ft1, ft0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ogt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp ogt double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -61,6 +82,13 @@
 ; RV32IFD-NEXT:    fle.d a0, ft1, ft0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_oge:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp oge double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -79,6 +107,13 @@
 ; RV32IFD-NEXT:    flt.d a0, ft1, ft0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_olt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp olt double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -97,6 +132,13 @@
 ; RV32IFD-NEXT:    fle.d a0, ft1, ft0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ole:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    ret
   %1 = fcmp ole double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -122,6 +164,20 @@
 ; RV32IFD-NEXT:    and a0, a1, a0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_one:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    feq.d a0, ft1, ft1
+; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    and a0, a1, a0
+; RV64IFD-NEXT:    feq.d a1, ft0, ft1
+; RV64IFD-NEXT:    not a1, a1
+; RV64IFD-NEXT:    seqz a0, a0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    and a0, a1, a0
+; RV64IFD-NEXT:    ret
   %1 = fcmp one double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -144,6 +200,17 @@
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ord:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    feq.d a0, ft0, ft0
+; RV64IFD-NEXT:    and a0, a0, a1
+; RV64IFD-NEXT:    seqz a0, a0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
   %1 = fcmp ord double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -167,6 +234,18 @@
 ; RV32IFD-NEXT:    or a0, a0, a1
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ueq:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    feq.d a0, ft1, ft0
+; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    feq.d a2, ft1, ft1
+; RV64IFD-NEXT:    and a1, a2, a1
+; RV64IFD-NEXT:    seqz a1, a1
+; RV64IFD-NEXT:    or a0, a0, a1
+; RV64IFD-NEXT:    ret
   %1 = fcmp ueq double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -186,6 +265,14 @@
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ugt:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
   %1 = fcmp ugt double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -205,6 +292,14 @@
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_uge:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
   %1 = fcmp uge double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -224,6 +319,14 @@
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ult:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    fle.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
   %1 = fcmp ult double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -243,6 +346,14 @@
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_ule:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    fmv.d.x ft1, a1
+; RV64IFD-NEXT:    flt.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
   %1 = fcmp ule double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -262,6 +373,14 @@
 ; RV32IFD-NEXT:    xori a0, a0, 1
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_une:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    fmv.d.x ft1, a0
+; RV64IFD-NEXT:    feq.d a0, ft1, ft0
+; RV64IFD-NEXT:    xori a0, a0, 1
+; RV64IFD-NEXT:    ret
   %1 = fcmp une double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -283,6 +402,16 @@
 ; RV32IFD-NEXT:    seqz a0, a0
 ; RV32IFD-NEXT:    addi sp, sp, 16
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_uno:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    fmv.d.x ft0, a1
+; RV64IFD-NEXT:    feq.d a1, ft0, ft0
+; RV64IFD-NEXT:    fmv.d.x ft0, a0
+; RV64IFD-NEXT:    feq.d a0, ft0, ft0
+; RV64IFD-NEXT:    and a0, a0, a1
+; RV64IFD-NEXT:    seqz a0, a0
+; RV64IFD-NEXT:    ret
   %1 = fcmp uno double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2
@@ -293,6 +422,11 @@
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi a0, zero, 1
 ; RV32IFD-NEXT:    ret
+;
+; RV64IFD-LABEL: fcmp_true:
+; RV64IFD:       # %bb.0:
+; RV64IFD-NEXT:    addi a0, zero, 1
+; RV64IFD-NEXT:    ret
   %1 = fcmp true double %a, %b
   %2 = zext i1 %1 to i32
   ret i32 %2