[RISCV] Support Bit-Preserving FP in F/D Extensions
Summary:
This allows some integer bitwise operations to instead be performed by
hardware fp instructions. This is correct because the RISC-V spec
requires the F and D extensions to use the IEEE-754 standard
representation, and fp register loads and stores to be bit-preserving.
This is tested against the soft-float ABI, but with hardware float
extensions enabled, so that the tests also ensure the optimisation also
fires in this case.
Reviewers: asb, luismarques
Reviewed By: asb
Subscribers: hiraditya, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, kito-cheng, shiva0217, jrtc27, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, rkruppe, PkmX, jocewei, psnobl, benna, Jim, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D62900
llvm-svn: 362790
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 377933e..2da0e31 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -302,6 +302,11 @@
return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
}
+bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
+ return (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
+ (VT == MVT::f64 && Subtarget.hasStdExtD());
+}
+
// Changes the condition code and swaps operands if necessary, so the SetCC
// operation matches one of the comparisons supported directly in the RISC-V
// ISA.
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index a7f397c..8e756a1 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -72,6 +72,8 @@
bool isZExtFree(SDValue Val, EVT VT2) const override;
bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
+ bool hasBitPreservingFPLogic(EVT VT) const override;
+
// Provide custom lowering hooks for some operations.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
diff --git a/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
new file mode 100644
index 0000000..d9054fc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/float-bit-preserving-dagcombines.ll
@@ -0,0 +1,390 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32F %s
+; RUN: llc -mtriple=riscv32 -mattr=+f,+d -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32FD %s
+; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64F %s
+; RUN: llc -mtriple=riscv64 -mattr=+f,+d -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64FD %s
+
+; These functions perform extra work to ensure that `%a3` starts in a
+; floating-point register, if the machine has them, and the result of
+; the bitwise operation is then needed in a floating-point register.
+; This should mean the optimisations will fire even if you're using the
+; soft-float ABI on a machine with hardware floating-point support.
+
+define float @bitcast_and(float %a1, float %a2) nounwind {
+; RV32F-LABEL: bitcast_and:
+; RV32F: # %bb.0:
+; RV32F-NEXT: fmv.w.x ft0, a1
+; RV32F-NEXT: fmv.w.x ft1, a0
+; RV32F-NEXT: fadd.s ft0, ft1, ft0
+; RV32F-NEXT: fabs.s ft0, ft0
+; RV32F-NEXT: fadd.s ft0, ft1, ft0
+; RV32F-NEXT: fmv.x.w a0, ft0
+; RV32F-NEXT: ret
+;
+; RV32FD-LABEL: bitcast_and:
+; RV32FD: # %bb.0:
+; RV32FD-NEXT: fmv.w.x ft0, a1
+; RV32FD-NEXT: fmv.w.x ft1, a0
+; RV32FD-NEXT: fadd.s ft0, ft1, ft0
+; RV32FD-NEXT: fabs.s ft0, ft0
+; RV32FD-NEXT: fadd.s ft0, ft1, ft0
+; RV32FD-NEXT: fmv.x.w a0, ft0
+; RV32FD-NEXT: ret
+;
+; RV64F-LABEL: bitcast_and:
+; RV64F: # %bb.0:
+; RV64F-NEXT: fmv.w.x ft0, a1
+; RV64F-NEXT: fmv.w.x ft1, a0
+; RV64F-NEXT: fadd.s ft0, ft1, ft0
+; RV64F-NEXT: fabs.s ft0, ft0
+; RV64F-NEXT: fadd.s ft0, ft1, ft0
+; RV64F-NEXT: fmv.x.w a0, ft0
+; RV64F-NEXT: ret
+;
+; RV64FD-LABEL: bitcast_and:
+; RV64FD: # %bb.0:
+; RV64FD-NEXT: fmv.w.x ft0, a1
+; RV64FD-NEXT: fmv.w.x ft1, a0
+; RV64FD-NEXT: fadd.s ft0, ft1, ft0
+; RV64FD-NEXT: fabs.s ft0, ft0
+; RV64FD-NEXT: fadd.s ft0, ft1, ft0
+; RV64FD-NEXT: fmv.x.w a0, ft0
+; RV64FD-NEXT: ret
+ %a3 = fadd float %a1, %a2
+ %bc1 = bitcast float %a3 to i32
+ %and = and i32 %bc1, 2147483647
+ %bc2 = bitcast i32 %and to float
+ %a4 = fadd float %a1, %bc2
+ ret float %a4
+}
+
+define double @bitcast_double_and(double %a1, double %a2) nounwind {
+; RV32F-LABEL: bitcast_double_and:
+; RV32F: # %bb.0:
+; RV32F-NEXT: addi sp, sp, -16
+; RV32F-NEXT: sw ra, 12(sp)
+; RV32F-NEXT: sw s0, 8(sp)
+; RV32F-NEXT: sw s1, 4(sp)
+; RV32F-NEXT: mv s0, a1
+; RV32F-NEXT: mv s1, a0
+; RV32F-NEXT: call __adddf3
+; RV32F-NEXT: mv a2, a0
+; RV32F-NEXT: lui a0, 524288
+; RV32F-NEXT: addi a0, a0, -1
+; RV32F-NEXT: and a3, a1, a0
+; RV32F-NEXT: mv a0, s1
+; RV32F-NEXT: mv a1, s0
+; RV32F-NEXT: call __adddf3
+; RV32F-NEXT: lw s1, 4(sp)
+; RV32F-NEXT: lw s0, 8(sp)
+; RV32F-NEXT: lw ra, 12(sp)
+; RV32F-NEXT: addi sp, sp, 16
+; RV32F-NEXT: ret
+;
+; RV32FD-LABEL: bitcast_double_and:
+; RV32FD: # %bb.0:
+; RV32FD-NEXT: addi sp, sp, -16
+; RV32FD-NEXT: sw a2, 8(sp)
+; RV32FD-NEXT: sw a3, 12(sp)
+; RV32FD-NEXT: fld ft0, 8(sp)
+; RV32FD-NEXT: sw a0, 8(sp)
+; RV32FD-NEXT: sw a1, 12(sp)
+; RV32FD-NEXT: fld ft1, 8(sp)
+; RV32FD-NEXT: fadd.d ft0, ft1, ft0
+; RV32FD-NEXT: fabs.d ft0, ft0
+; RV32FD-NEXT: fadd.d ft0, ft1, ft0
+; RV32FD-NEXT: fsd ft0, 8(sp)
+; RV32FD-NEXT: lw a0, 8(sp)
+; RV32FD-NEXT: lw a1, 12(sp)
+; RV32FD-NEXT: addi sp, sp, 16
+; RV32FD-NEXT: ret
+;
+; RV64F-LABEL: bitcast_double_and:
+; RV64F: # %bb.0:
+; RV64F-NEXT: addi sp, sp, -16
+; RV64F-NEXT: sd ra, 8(sp)
+; RV64F-NEXT: sd s0, 0(sp)
+; RV64F-NEXT: mv s0, a0
+; RV64F-NEXT: call __adddf3
+; RV64F-NEXT: addi a1, zero, -1
+; RV64F-NEXT: slli a1, a1, 63
+; RV64F-NEXT: addi a1, a1, -1
+; RV64F-NEXT: and a1, a0, a1
+; RV64F-NEXT: mv a0, s0
+; RV64F-NEXT: call __adddf3
+; RV64F-NEXT: ld s0, 0(sp)
+; RV64F-NEXT: ld ra, 8(sp)
+; RV64F-NEXT: addi sp, sp, 16
+; RV64F-NEXT: ret
+;
+; RV64FD-LABEL: bitcast_double_and:
+; RV64FD: # %bb.0:
+; RV64FD-NEXT: fmv.d.x ft0, a1
+; RV64FD-NEXT: fmv.d.x ft1, a0
+; RV64FD-NEXT: fadd.d ft0, ft1, ft0
+; RV64FD-NEXT: fabs.d ft0, ft0
+; RV64FD-NEXT: fadd.d ft0, ft1, ft0
+; RV64FD-NEXT: fmv.x.d a0, ft0
+; RV64FD-NEXT: ret
+ %a3 = fadd double %a1, %a2
+ %bc1 = bitcast double %a3 to i64
+ %and = and i64 %bc1, 9223372036854775807
+ %bc2 = bitcast i64 %and to double
+ %a4 = fadd double %a1, %bc2
+ ret double %a4
+}
+
+
+define float @bitcast_xor(float %a1, float %a2) nounwind {
+; RV32F-LABEL: bitcast_xor:
+; RV32F: # %bb.0:
+; RV32F-NEXT: fmv.w.x ft0, a1
+; RV32F-NEXT: fmv.w.x ft1, a0
+; RV32F-NEXT: fmul.s ft0, ft1, ft0
+; RV32F-NEXT: fneg.s ft0, ft0
+; RV32F-NEXT: fmul.s ft0, ft1, ft0
+; RV32F-NEXT: fmv.x.w a0, ft0
+; RV32F-NEXT: ret
+;
+; RV32FD-LABEL: bitcast_xor:
+; RV32FD: # %bb.0:
+; RV32FD-NEXT: fmv.w.x ft0, a1
+; RV32FD-NEXT: fmv.w.x ft1, a0
+; RV32FD-NEXT: fmul.s ft0, ft1, ft0
+; RV32FD-NEXT: fneg.s ft0, ft0
+; RV32FD-NEXT: fmul.s ft0, ft1, ft0
+; RV32FD-NEXT: fmv.x.w a0, ft0
+; RV32FD-NEXT: ret
+;
+; RV64F-LABEL: bitcast_xor:
+; RV64F: # %bb.0:
+; RV64F-NEXT: fmv.w.x ft0, a1
+; RV64F-NEXT: fmv.w.x ft1, a0
+; RV64F-NEXT: fmul.s ft0, ft1, ft0
+; RV64F-NEXT: fneg.s ft0, ft0
+; RV64F-NEXT: fmul.s ft0, ft1, ft0
+; RV64F-NEXT: fmv.x.w a0, ft0
+; RV64F-NEXT: ret
+;
+; RV64FD-LABEL: bitcast_xor:
+; RV64FD: # %bb.0:
+; RV64FD-NEXT: fmv.w.x ft0, a1
+; RV64FD-NEXT: fmv.w.x ft1, a0
+; RV64FD-NEXT: fmul.s ft0, ft1, ft0
+; RV64FD-NEXT: fneg.s ft0, ft0
+; RV64FD-NEXT: fmul.s ft0, ft1, ft0
+; RV64FD-NEXT: fmv.x.w a0, ft0
+; RV64FD-NEXT: ret
+ %a3 = fmul float %a1, %a2
+ %bc1 = bitcast float %a3 to i32
+ %and = xor i32 %bc1, 2147483648
+ %bc2 = bitcast i32 %and to float
+ %a4 = fmul float %a1, %bc2
+ ret float %a4
+}
+
+define double @bitcast_double_xor(double %a1, double %a2) nounwind {
+; RV32F-LABEL: bitcast_double_xor:
+; RV32F: # %bb.0:
+; RV32F-NEXT: addi sp, sp, -16
+; RV32F-NEXT: sw ra, 12(sp)
+; RV32F-NEXT: sw s0, 8(sp)
+; RV32F-NEXT: sw s1, 4(sp)
+; RV32F-NEXT: mv s0, a1
+; RV32F-NEXT: mv s1, a0
+; RV32F-NEXT: call __muldf3
+; RV32F-NEXT: mv a2, a0
+; RV32F-NEXT: lui a0, 524288
+; RV32F-NEXT: xor a3, a1, a0
+; RV32F-NEXT: mv a0, s1
+; RV32F-NEXT: mv a1, s0
+; RV32F-NEXT: call __muldf3
+; RV32F-NEXT: lw s1, 4(sp)
+; RV32F-NEXT: lw s0, 8(sp)
+; RV32F-NEXT: lw ra, 12(sp)
+; RV32F-NEXT: addi sp, sp, 16
+; RV32F-NEXT: ret
+;
+; RV32FD-LABEL: bitcast_double_xor:
+; RV32FD: # %bb.0:
+; RV32FD-NEXT: addi sp, sp, -16
+; RV32FD-NEXT: sw a2, 8(sp)
+; RV32FD-NEXT: sw a3, 12(sp)
+; RV32FD-NEXT: fld ft0, 8(sp)
+; RV32FD-NEXT: sw a0, 8(sp)
+; RV32FD-NEXT: sw a1, 12(sp)
+; RV32FD-NEXT: fld ft1, 8(sp)
+; RV32FD-NEXT: fmul.d ft0, ft1, ft0
+; RV32FD-NEXT: fneg.d ft0, ft0
+; RV32FD-NEXT: fmul.d ft0, ft1, ft0
+; RV32FD-NEXT: fsd ft0, 8(sp)
+; RV32FD-NEXT: lw a0, 8(sp)
+; RV32FD-NEXT: lw a1, 12(sp)
+; RV32FD-NEXT: addi sp, sp, 16
+; RV32FD-NEXT: ret
+;
+; RV64F-LABEL: bitcast_double_xor:
+; RV64F: # %bb.0:
+; RV64F-NEXT: addi sp, sp, -16
+; RV64F-NEXT: sd ra, 8(sp)
+; RV64F-NEXT: sd s0, 0(sp)
+; RV64F-NEXT: mv s0, a0
+; RV64F-NEXT: call __muldf3
+; RV64F-NEXT: addi a1, zero, -1
+; RV64F-NEXT: slli a1, a1, 63
+; RV64F-NEXT: xor a1, a0, a1
+; RV64F-NEXT: mv a0, s0
+; RV64F-NEXT: call __muldf3
+; RV64F-NEXT: ld s0, 0(sp)
+; RV64F-NEXT: ld ra, 8(sp)
+; RV64F-NEXT: addi sp, sp, 16
+; RV64F-NEXT: ret
+;
+; RV64FD-LABEL: bitcast_double_xor:
+; RV64FD: # %bb.0:
+; RV64FD-NEXT: fmv.d.x ft0, a1
+; RV64FD-NEXT: fmv.d.x ft1, a0
+; RV64FD-NEXT: fmul.d ft0, ft1, ft0
+; RV64FD-NEXT: fneg.d ft0, ft0
+; RV64FD-NEXT: fmul.d ft0, ft1, ft0
+; RV64FD-NEXT: fmv.x.d a0, ft0
+; RV64FD-NEXT: ret
+ %a3 = fmul double %a1, %a2
+ %bc1 = bitcast double %a3 to i64
+ %and = xor i64 %bc1, 9223372036854775808
+ %bc2 = bitcast i64 %and to double
+ %a4 = fmul double %a1, %bc2
+ ret double %a4
+}
+
+define float @bitcast_or(float %a1, float %a2) nounwind {
+; RV32F-LABEL: bitcast_or:
+; RV32F: # %bb.0:
+; RV32F-NEXT: fmv.w.x ft0, a1
+; RV32F-NEXT: fmv.w.x ft1, a0
+; RV32F-NEXT: fmul.s ft0, ft1, ft0
+; RV32F-NEXT: fabs.s ft0, ft0
+; RV32F-NEXT: fneg.s ft0, ft0
+; RV32F-NEXT: fmul.s ft0, ft1, ft0
+; RV32F-NEXT: fmv.x.w a0, ft0
+; RV32F-NEXT: ret
+;
+; RV32FD-LABEL: bitcast_or:
+; RV32FD: # %bb.0:
+; RV32FD-NEXT: fmv.w.x ft0, a1
+; RV32FD-NEXT: fmv.w.x ft1, a0
+; RV32FD-NEXT: fmul.s ft0, ft1, ft0
+; RV32FD-NEXT: fabs.s ft0, ft0
+; RV32FD-NEXT: fneg.s ft0, ft0
+; RV32FD-NEXT: fmul.s ft0, ft1, ft0
+; RV32FD-NEXT: fmv.x.w a0, ft0
+; RV32FD-NEXT: ret
+;
+; RV64F-LABEL: bitcast_or:
+; RV64F: # %bb.0:
+; RV64F-NEXT: fmv.w.x ft0, a1
+; RV64F-NEXT: fmv.w.x ft1, a0
+; RV64F-NEXT: fmul.s ft0, ft1, ft0
+; RV64F-NEXT: fabs.s ft0, ft0
+; RV64F-NEXT: fneg.s ft0, ft0
+; RV64F-NEXT: fmul.s ft0, ft1, ft0
+; RV64F-NEXT: fmv.x.w a0, ft0
+; RV64F-NEXT: ret
+;
+; RV64FD-LABEL: bitcast_or:
+; RV64FD: # %bb.0:
+; RV64FD-NEXT: fmv.w.x ft0, a1
+; RV64FD-NEXT: fmv.w.x ft1, a0
+; RV64FD-NEXT: fmul.s ft0, ft1, ft0
+; RV64FD-NEXT: fabs.s ft0, ft0
+; RV64FD-NEXT: fneg.s ft0, ft0
+; RV64FD-NEXT: fmul.s ft0, ft1, ft0
+; RV64FD-NEXT: fmv.x.w a0, ft0
+; RV64FD-NEXT: ret
+ %a3 = fmul float %a1, %a2
+ %bc1 = bitcast float %a3 to i32
+ %and = or i32 %bc1, 2147483648
+ %bc2 = bitcast i32 %and to float
+ %a4 = fmul float %a1, %bc2
+ ret float %a4
+}
+
+define double @bitcast_double_or(double %a1, double %a2) nounwind {
+; RV32F-LABEL: bitcast_double_or:
+; RV32F: # %bb.0:
+; RV32F-NEXT: addi sp, sp, -16
+; RV32F-NEXT: sw ra, 12(sp)
+; RV32F-NEXT: sw s0, 8(sp)
+; RV32F-NEXT: sw s1, 4(sp)
+; RV32F-NEXT: mv s0, a1
+; RV32F-NEXT: mv s1, a0
+; RV32F-NEXT: call __muldf3
+; RV32F-NEXT: mv a2, a0
+; RV32F-NEXT: lui a0, 524288
+; RV32F-NEXT: or a3, a1, a0
+; RV32F-NEXT: mv a0, s1
+; RV32F-NEXT: mv a1, s0
+; RV32F-NEXT: call __muldf3
+; RV32F-NEXT: lw s1, 4(sp)
+; RV32F-NEXT: lw s0, 8(sp)
+; RV32F-NEXT: lw ra, 12(sp)
+; RV32F-NEXT: addi sp, sp, 16
+; RV32F-NEXT: ret
+;
+; RV32FD-LABEL: bitcast_double_or:
+; RV32FD: # %bb.0:
+; RV32FD-NEXT: addi sp, sp, -16
+; RV32FD-NEXT: sw a2, 8(sp)
+; RV32FD-NEXT: sw a3, 12(sp)
+; RV32FD-NEXT: fld ft0, 8(sp)
+; RV32FD-NEXT: sw a0, 8(sp)
+; RV32FD-NEXT: sw a1, 12(sp)
+; RV32FD-NEXT: fld ft1, 8(sp)
+; RV32FD-NEXT: fmul.d ft0, ft1, ft0
+; RV32FD-NEXT: fabs.d ft0, ft0
+; RV32FD-NEXT: fneg.d ft0, ft0
+; RV32FD-NEXT: fmul.d ft0, ft1, ft0
+; RV32FD-NEXT: fsd ft0, 8(sp)
+; RV32FD-NEXT: lw a0, 8(sp)
+; RV32FD-NEXT: lw a1, 12(sp)
+; RV32FD-NEXT: addi sp, sp, 16
+; RV32FD-NEXT: ret
+;
+; RV64F-LABEL: bitcast_double_or:
+; RV64F: # %bb.0:
+; RV64F-NEXT: addi sp, sp, -16
+; RV64F-NEXT: sd ra, 8(sp)
+; RV64F-NEXT: sd s0, 0(sp)
+; RV64F-NEXT: mv s0, a0
+; RV64F-NEXT: call __muldf3
+; RV64F-NEXT: addi a1, zero, -1
+; RV64F-NEXT: slli a1, a1, 63
+; RV64F-NEXT: or a1, a0, a1
+; RV64F-NEXT: mv a0, s0
+; RV64F-NEXT: call __muldf3
+; RV64F-NEXT: ld s0, 0(sp)
+; RV64F-NEXT: ld ra, 8(sp)
+; RV64F-NEXT: addi sp, sp, 16
+; RV64F-NEXT: ret
+;
+; RV64FD-LABEL: bitcast_double_or:
+; RV64FD: # %bb.0:
+; RV64FD-NEXT: fmv.d.x ft0, a1
+; RV64FD-NEXT: fmv.d.x ft1, a0
+; RV64FD-NEXT: fmul.d ft0, ft1, ft0
+; RV64FD-NEXT: fabs.d ft0, ft0
+; RV64FD-NEXT: fneg.d ft0, ft0
+; RV64FD-NEXT: fmul.d ft0, ft1, ft0
+; RV64FD-NEXT: fmv.x.d a0, ft0
+; RV64FD-NEXT: ret
+ %a3 = fmul double %a1, %a2
+ %bc1 = bitcast double %a3 to i64
+ %and = or i64 %bc1, 9223372036854775808
+ %bc2 = bitcast i64 %and to double
+ %a4 = fmul double %a1, %bc2
+ ret double %a4
+}