blob: b82f71968029454a5e9746f29bb9cbb4d39860a2 [file] [log] [blame]
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <smmintrin.h>
#include <xnnpack/math-stubs.h>
void xnn_math_f32_f16_cvt__sse41(
size_t n,
const float* input,
void* output)
{
assert(n % (8 * sizeof(uint16_t)) == 0);
const __m128 vscale_to_inf = _mm_set1_ps(0x1.0p+112f);
const __m128 vscale_to_zero = _mm_set1_ps(0x1.0p-110f);
const __m128 vnonsign_mask = _mm_castsi128_ps(_mm_set1_epi32(0x7FFFFFFF));
const __m128i vexp_bias = _mm_set1_epi32(0x07800000);
const __m128i vexpw_max = _mm_set1_epi32(0x7F800000);
const __m128i vbias_min = _mm_set1_epi32(0x40008000);
const __m128i vexph_mask = _mm_set1_epi32(0x7C00);
const __m128i vmanth_mask = _mm_set1_epi32(0x0FFF);
const __m128i vnanh = _mm_set1_epi16(0x7E00);
uint16_t* o = (uint16_t*) output;
for (; n != 0; n -= 8 * sizeof(uint16_t)) {
const __m128 vx_lo = _mm_loadu_ps(input);
const __m128 vx_hi = _mm_loadu_ps(input + 4);
input += 8;
const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
const __m128i vsignw_lo = _mm_srai_epi32(_mm_castps_si128(vx_lo), 31);
const __m128i vsignw_hi = _mm_srai_epi32(_mm_castps_si128(vx_hi), 31);
__m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
__m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
__m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
__m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
__m128i vsignh = _mm_packs_epi32(vsignw_lo, vsignw_hi);
vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
vsignh = _mm_slli_epi16(vsignh, 15);
vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
__m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
__m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
const __m128i vh = _mm_or_si128(vabsh, vsignh);
_mm_storeu_si128((__m128i*) o, vh);
o += 8;
}
}