[X86][F16C] Remove cvtph2ps intrinsics and use generic half2float conversion (PR37554)
This removes everything but int_x86_avx512_mask_vcvtph2ps_512 which provides the SAE variant, but even this can use the fpext generic if the rounding control is the default.
Differential Revision: https://reviews.llvm.org/D75162
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 47b3abd..ba3b14c 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -10327,6 +10327,46 @@
return EmitX86CpuIs(CPUStr);
}
+// Convert F16 halfs to floats.
+static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
+ ArrayRef<Value *> Ops,
+ llvm::Type *DstTy) {
+ assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&
+ "Unknown cvtph2ps intrinsic");
+
+ // If the SAE intrinsic doesn't use default rounding then we can't upgrade.
+ if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) {
+ Intrinsic::ID IID = Intrinsic::x86_avx512_mask_vcvtph2ps_512;
+ Function *F =
+ CGF.CGM.getIntrinsic(IID, {DstTy, Ops[0]->getType(), Ops[1]->getType(),
+ Ops[2]->getType(), Ops[3]->getType()});
+ return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
+ }
+
+ unsigned NumDstElts = DstTy->getVectorNumElements();
+ Value *Src = Ops[0];
+
+ // Extract the subvector.
+ if (NumDstElts != Src->getType()->getVectorNumElements()) {
+ assert(NumDstElts == 4 && "Unexpected vector size");
+ uint32_t ShuffleMask[4] = {0, 1, 2, 3};
+ Src = CGF.Builder.CreateShuffleVector(Src, UndefValue::get(Src->getType()),
+ ShuffleMask);
+ }
+
+ // Bitcast from vXi16 to vXf16.
+ llvm::Type *HalfTy = llvm::VectorType::get(
+ llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts);
+ Src = CGF.Builder.CreateBitCast(Src, HalfTy);
+
+ // Perform the fp-extension.
+ Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps");
+
+ if (Ops.size() >= 3)
+ Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]);
+ return Res;
+}
+
// Convert a BF16 to a float.
static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
const CallExpr *E,
@@ -12531,6 +12571,14 @@
case X86::BI__builtin_ia32_cmpordsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
+ // f16c half2float intrinsics
+ case X86::BI__builtin_ia32_vcvtph2ps:
+ case X86::BI__builtin_ia32_vcvtph2ps256:
+ case X86::BI__builtin_ia32_vcvtph2ps_mask:
+ case X86::BI__builtin_ia32_vcvtph2ps256_mask:
+ case X86::BI__builtin_ia32_vcvtph2ps512_mask:
+ return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
+
// AVX512 bf16 intrinsics
case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
Ops[2] = getMaskVecValue(*this, Ops[2],
diff --git a/clang/test/CodeGen/avx512f-builtins-constrained.c b/clang/test/CodeGen/avx512f-builtins-constrained.c
index dcddd24..1ccc234 100644
--- a/clang/test/CodeGen/avx512f-builtins-constrained.c
+++ b/clang/test/CodeGen/avx512f-builtins-constrained.c
@@ -171,21 +171,32 @@
__m512 test_mm512_cvtph_ps (__m256i __A)
{
// COMMON-LABEL: test_mm512_cvtph_ps
- // COMMONIR: @llvm.x86.avx512.mask.vcvtph2ps.512
+ // COMMONIR: bitcast <4 x i64> %{{.*}} to <16 x i16>
+ // COMMONIR: bitcast <16 x i16> %{{.*}} to <16 x half>
+ // UNCONSTRAINED: fpext <16 x half> %{{.*}} to <16 x float>
+ // CONSTRAINED: call <16 x float> @llvm.experimental.constrained.fpext.v16f32.v16f16(<16 x half> %{{.*}}, metadata !"fpexcept.strict")
return _mm512_cvtph_ps (__A);
}
__m512 test_mm512_mask_cvtph_ps (__m512 __W, __mmask16 __U, __m256i __A)
{
// COMMON-LABEL: test_mm512_mask_cvtph_ps
- // COMMONIR: @llvm.x86.avx512.mask.vcvtph2ps.512
+ // COMMONIR: bitcast <4 x i64> %{{.*}} to <16 x i16>
+ // COMMONIR: bitcast <16 x i16> %{{.*}} to <16 x half>
+ // UNCONSTRAINED: fpext <16 x half> %{{.*}} to <16 x float>
+ // CONSTRAINED: call <16 x float> @llvm.experimental.constrained.fpext.v16f32.v16f16(<16 x half> %{{.*}}, metadata !"fpexcept.strict")
+ // COMMONIR: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_cvtph_ps (__W,__U,__A);
}
__m512 test_mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A)
{
// COMMON-LABEL: test_mm512_maskz_cvtph_ps
- // COMMONIR: @llvm.x86.avx512.mask.vcvtph2ps.512
+ // COMMONIR: bitcast <4 x i64> %{{.*}} to <16 x i16>
+ // COMMONIR: bitcast <16 x i16> %{{.*}} to <16 x half>
+ // UNCONSTRAINED: fpext <16 x half> %{{.*}} to <16 x float>
+ // CONSTRAINED: call <16 x float> @llvm.experimental.constrained.fpext.v16f32.v16f16(<16 x half> %{{.*}}, metadata !"fpexcept.strict")
+ // COMMONIR: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_cvtph_ps (__U,__A);
}
diff --git a/clang/test/CodeGen/avx512f-builtins.c b/clang/test/CodeGen/avx512f-builtins.c
index 390ea14..c193e7d 100644
--- a/clang/test/CodeGen/avx512f-builtins.c
+++ b/clang/test/CodeGen/avx512f-builtins.c
@@ -9463,21 +9463,29 @@
__m512 test_mm512_cvtph_ps (__m256i __A)
{
// CHECK-LABEL: @test_mm512_cvtph_ps
- // CHECK: @llvm.x86.avx512.mask.vcvtph2ps.512
+ // CHECK: bitcast <4 x i64> %{{.*}} to <16 x i16>
+ // CHECK: bitcast <16 x i16> %{{.*}} to <16 x half>
+ // CHECK: fpext <16 x half> %{{.*}} to <16 x float>
return _mm512_cvtph_ps (__A);
}
__m512 test_mm512_mask_cvtph_ps (__m512 __W, __mmask16 __U, __m256i __A)
{
// CHECK-LABEL: @test_mm512_mask_cvtph_ps
- // CHECK: @llvm.x86.avx512.mask.vcvtph2ps.512
+ // CHECK: bitcast <4 x i64> %{{.*}} to <16 x i16>
+ // CHECK: bitcast <16 x i16> %{{.*}} to <16 x half>
+ // CHECK: fpext <16 x half> %{{.*}} to <16 x float>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_cvtph_ps (__W,__U,__A);
}
__m512 test_mm512_maskz_cvtph_ps (__mmask16 __U, __m256i __A)
{
// CHECK-LABEL: @test_mm512_maskz_cvtph_ps
- // CHECK: @llvm.x86.avx512.mask.vcvtph2ps.512
+ // CHECK: bitcast <4 x i64> %{{.*}} to <16 x i16>
+ // CHECK: bitcast <16 x i16> %{{.*}} to <16 x half>
+ // CHECK: fpext <16 x half> %{{.*}} to <16 x float>
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_cvtph_ps (__U,__A);
}
diff --git a/clang/test/CodeGen/avx512vl-builtins-constrained.c b/clang/test/CodeGen/avx512vl-builtins-constrained.c
index 0e2aa4e..bc59510 100644
--- a/clang/test/CodeGen/avx512vl-builtins-constrained.c
+++ b/clang/test/CodeGen/avx512vl-builtins-constrained.c
@@ -8,25 +8,43 @@
__m128 test_mm_mask_cvtph_ps(__m128 __W, __mmask8 __U, __m128i __A) {
// COMMON-LABEL: @test_mm_mask_cvtph_ps
- // COMMONIR: @llvm.x86.avx512.mask.vcvtph2ps.128
+ // COMMONIR: bitcast <2 x i64> %{{.*}} to <8 x i16>
+ // COMMONIR: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // COMMONIR: bitcast <4 x i16> %{{.*}} to <4 x half>
+ // UNCONSTRAINED: fpext <4 x half> %{{.*}} to <4 x float>
+ // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict")
+ // COMMONIR: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_cvtph_ps(__W, __U, __A);
}
__m128 test_mm_maskz_cvtph_ps(__mmask8 __U, __m128i __A) {
// COMMON-LABEL: @test_mm_maskz_cvtph_ps
- // COMMONIR: @llvm.x86.avx512.mask.vcvtph2ps.128
+ // COMMONIR: bitcast <2 x i64> %{{.*}} to <8 x i16>
+ // COMMONIR: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // COMMONIR: bitcast <4 x i16> %{{.*}} to <4 x half>
+ // UNCONSTRAINED: fpext <4 x half> %{{.*}} to <4 x float>
+ // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict")
+ // COMMONIR: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_cvtph_ps(__U, __A);
}
__m256 test_mm256_mask_cvtph_ps(__m256 __W, __mmask8 __U, __m128i __A) {
// COMMON-LABEL: @test_mm256_mask_cvtph_ps
- // COMMONIR: @llvm.x86.avx512.mask.vcvtph2ps.256
+ // COMMONIR: bitcast <2 x i64> %{{.*}} to <8 x i16>
+ // COMMONIR: bitcast <8 x i16> %{{.*}} to <8 x half>
+ // UNCONSTRAINED: fpext <8 x half> %{{.*}} to <8 x float>
+ // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(<8 x half> %{{.*}}, metadata !"fpexcept.strict")
+ // COMMONIR: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_cvtph_ps(__W, __U, __A);
}
__m256 test_mm256_maskz_cvtph_ps(__mmask8 __U, __m128i __A) {
// COMMON-LABEL: @test_mm256_maskz_cvtph_ps
- // COMMONIR: @llvm.x86.avx512.mask.vcvtph2ps.256
+ // COMMONIR: bitcast <2 x i64> %{{.*}} to <8 x i16>
+ // COMMONIR: bitcast <8 x i16> %{{.*}} to <8 x half>
+ // UNCONSTRAINED: fpext <8 x half> %{{.*}} to <8 x float>
+ // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(<8 x half> %{{.*}}, metadata !"fpexcept.strict")
+ // COMMONIR: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_cvtph_ps(__U, __A);
}
diff --git a/clang/test/CodeGen/avx512vl-builtins.c b/clang/test/CodeGen/avx512vl-builtins.c
index 5bed16e..8d8fded 100644
--- a/clang/test/CodeGen/avx512vl-builtins.c
+++ b/clang/test/CodeGen/avx512vl-builtins.c
@@ -9692,25 +9692,39 @@
__m128 test_mm_mask_cvtph_ps(__m128 __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_cvtph_ps
- // CHECK: @llvm.x86.avx512.mask.vcvtph2ps.128
+ // CHECK: bitcast <2 x i64> %{{.*}} to <8 x i16>
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: bitcast <4 x i16> %{{.*}} to <4 x half>
+ // CHECK: fpext <4 x half> %{{.*}} to <4 x float>
+ // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_cvtph_ps(__W, __U, __A);
}
__m128 test_mm_maskz_cvtph_ps(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_maskz_cvtph_ps
- // CHECK: @llvm.x86.avx512.mask.vcvtph2ps.128
+ // CHECK: bitcast <2 x i64> %{{.*}} to <8 x i16>
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: bitcast <4 x i16> %{{.*}} to <4 x half>
+ // CHECK: fpext <4 x half> %{{.*}} to <4 x float>
+ // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_cvtph_ps(__U, __A);
}
__m256 test_mm256_mask_cvtph_ps(__m256 __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm256_mask_cvtph_ps
- // CHECK: @llvm.x86.avx512.mask.vcvtph2ps.256
+ // CHECK: bitcast <2 x i64> %{{.*}} to <8 x i16>
+ // CHECK: bitcast <8 x i16> %{{.*}} to <8 x half>
+ // CHECK: fpext <8 x half> %{{.*}} to <8 x float>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_cvtph_ps(__W, __U, __A);
}
__m256 test_mm256_maskz_cvtph_ps(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm256_maskz_cvtph_ps
- // CHECK: @llvm.x86.avx512.mask.vcvtph2ps.256
+ // CHECK: bitcast <2 x i64> %{{.*}} to <8 x i16>
+ // CHECK: bitcast <8 x i16> %{{.*}} to <8 x half>
+ // CHECK: fpext <8 x half> %{{.*}} to <8 x float>
+ // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_cvtph_ps(__U, __A);
}
diff --git a/clang/test/CodeGen/f16c-builtins-constrained.c b/clang/test/CodeGen/f16c-builtins-constrained.c
index 74cf3d1..ce84155 100644
--- a/clang/test/CodeGen/f16c-builtins-constrained.c
+++ b/clang/test/CodeGen/f16c-builtins-constrained.c
@@ -13,7 +13,9 @@
// CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 5
// CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 6
// CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 7
- // CHECK: call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %{{.*}})
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: bitcast <4 x i16> %{{.*}} to <4 x half>
+ // CHECK: call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict")
// CHECK: extractelement <4 x float> %{{.*}}, i32 0
return _cvtsh_ss(a);
}
@@ -34,13 +36,18 @@
__m128 test_mm_cvtph_ps(__m128i a) {
// CHECK-LABEL: test_mm_cvtph_ps
- // CHECK: call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %{{.*}})
+ // CHECK: bitcast <2 x i64> %{{.*}} to <8 x i16>
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: bitcast <4 x i16> %{{.*}} to <4 x half>
+ // CHECK: call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %{{.*}}, metadata !"fpexcept.strict")
return _mm_cvtph_ps(a);
}
__m256 test_mm256_cvtph_ps(__m128i a) {
// CHECK-LABEL: test_mm256_cvtph_ps
- // CHECK: call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %{{.*}})
+ // CHECK: bitcast <2 x i64> %{{.*}} to <8 x i16>
+ // CHECK: bitcast <8 x i16> %{{.*}} to <8 x half>
+ // CHECK: call <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(<8 x half> %{{.*}}, metadata !"fpexcept.strict")
return _mm256_cvtph_ps(a);
}
diff --git a/clang/test/CodeGen/f16c-builtins.c b/clang/test/CodeGen/f16c-builtins.c
index ce14187..1616cfb 100644
--- a/clang/test/CodeGen/f16c-builtins.c
+++ b/clang/test/CodeGen/f16c-builtins.c
@@ -13,7 +13,9 @@
// CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 5
// CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 6
// CHECK: insertelement <8 x i16> %{{.*}}, i16 0, i32 7
- // CHECK: call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %{{.*}})
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: bitcast <4 x i16> %{{.*}} to <4 x half>
+ // CHECK: fpext <4 x half> %{{.*}} to <4 x float>
// CHECK: extractelement <4 x float> %{{.*}}, i32 0
return _cvtsh_ss(a);
}
@@ -31,13 +33,18 @@
__m128 test_mm_cvtph_ps(__m128i a) {
// CHECK-LABEL: test_mm_cvtph_ps
- // CHECK: call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %{{.*}})
+ // CHECK: bitcast <2 x i64> %{{.*}} to <8 x i16>
+ // CHECK: shufflevector <8 x i16> %{{.*}}, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ // CHECK: bitcast <4 x i16> %{{.*}} to <4 x half>
+ // CHECK: fpext <4 x half> %{{.*}} to <4 x float>
return _mm_cvtph_ps(a);
}
__m256 test_mm256_cvtph_ps(__m128i a) {
// CHECK-LABEL: test_mm256_cvtph_ps
- // CHECK: call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %{{.*}})
+ // CHECK: bitcast <2 x i64> %{{.*}} to <8 x i16>
+ // CHECK: bitcast <8 x i16> %{{.*}} to <8 x half>
+ // CHECK: fpext <8 x half> %{{.*}} to <8 x float>
return _mm256_cvtph_ps(a);
}