ARM & AArch64 NEON: share the vabs implementation.
This changes ARM to use @llvm.fabs for floating-point vabs. Patterns
already existed in the backend, and it might help mid-end phases since
it's more likely to be understood than @llvm.arm.neon.vabs.
llvm-svn: 201313
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 740a869..e25a1aa 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -1777,6 +1777,12 @@
unsigned Int;
switch (BuiltinID) {
default: break;
+ case NEON::BI__builtin_neon_vabs_v:
+ case NEON::BI__builtin_neon_vabsq_v:
+ if (VTy->getElementType()->isFloatingPointTy())
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, Ty), Ops,
+ "vabs");
case NEON::BI__builtin_neon_vaeseq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aese),
Ops, "aese");
@@ -3979,13 +3985,6 @@
Int = Intrinsic::aarch64_neon_vmulx;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
}
- case NEON::BI__builtin_neon_vabs_v:
- case NEON::BI__builtin_neon_vabsq_v: {
- if (VTy->getElementType()->isFloatingPointTy()) {
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
- }
- return EmitARMBuiltinExpr(NEON::BI__builtin_neon_vabs_v, E);
- }
case NEON::BI__builtin_neon_vsqadd_v:
case NEON::BI__builtin_neon_vsqaddq_v: {
Int = Intrinsic::aarch64_neon_usqadd;
@@ -4511,10 +4510,6 @@
unsigned Int;
switch (BuiltinID) {
default: return 0;
- case NEON::BI__builtin_neon_vabs_v:
- case NEON::BI__builtin_neon_vabsq_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, Ty),
- Ops, "vabs");
case NEON::BI__builtin_neon_vld1q_lane_v:
// Handle 64-bit integer elements as a special case. Use shuffles of
// one-element vectors to avoid poor code for i64 in the backend.