blob: e9a5afc97077dceaa68d6f85dc0cc58d4402b302 [file] [log] [blame]
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert ELEMENTS_TILE % 8 == 0
$assert ELEMENTS_TILE >= 8
$SIMD_TILE = ELEMENTS_TILE // 8
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/raddstoreexpminusmax.h>
void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x${ELEMENTS_TILE}${"" if ACCUMULATORS == 1 else "_acc%d" % ACCUMULATORS}(
size_t elements,
const float* input,
const float* max,
float* output,
float* sum,
const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(elements % sizeof(float) == 0);
const __m256 vi_max = _mm256_broadcast_ss(max);
const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
$for K in range(ACCUMULATORS):
__m256 vacc${K} = _mm256_setzero_ps();
for (; elements >= ${ELEMENTS_TILE} * sizeof(float); elements -= ${ELEMENTS_TILE} * sizeof(float)) {
const __m256 vi0 = _mm256_loadu_ps(input);
$for N in range(1, SIMD_TILE):
const __m256 vi${N} = _mm256_loadu_ps(input + ${N * 8});
input += ${ELEMENTS_TILE};
$for N in range(SIMD_TILE):
const __m256 vx${N} = _mm256_sub_ps(vi${N}, vi_max);
$for N in range(SIMD_TILE):
__m256 vn${N} = _mm256_fmadd_ps(vx${N}, vlog2e, vmagic_bias);
$for N in range(SIMD_TILE):
const __m256 vs${N} = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn${N}), 23));
$for N in range(SIMD_TILE):
vn${N} = _mm256_sub_ps(vn${N}, vmagic_bias);
$for N in range(SIMD_TILE):
__m256 vt${N} = _mm256_fmadd_ps(vn${N}, vminus_ln2, vx${N});
$for N in range(SIMD_TILE):
__m256 vp${N} = _mm256_fmadd_ps(vc5, vt${N}, vc4);
$for N in range(SIMD_TILE):
vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vc3);
$for N in range(SIMD_TILE):
vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vc2);
$for N in range(SIMD_TILE):
vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vc1);
$for N in range(SIMD_TILE):
vt${N} = _mm256_mul_ps(vt${N}, vs${N});
$for N in range(SIMD_TILE):
__m256 vf${N} = _mm256_fmadd_ps(vt${N}, vp${N}, vs${N});
$for N in range(SIMD_TILE):
vf${N} = _mm256_andnot_ps(_mm256_cmp_ps(vx${N}, vdenorm_cutoff, _CMP_LT_OS), vf${N});
_mm256_storeu_ps(output, vf0);
$for N in range(1, SIMD_TILE):
_mm256_storeu_ps(output + ${N * 8}, vf${N});
output += ${ELEMENTS_TILE};
$for N in range(SIMD_TILE):
vacc${N % ACCUMULATORS} = _mm256_add_ps(vacc${N % ACCUMULATORS}, vf${N});
}
$if ACCUMULATORS > 1:
$ACC_SLICE = 1
$while ACC_SLICE < ACCUMULATORS:
$for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
$if A + ACC_SLICE < ACCUMULATORS:
vacc${A} = _mm256_add_ps(vacc${A}, vacc${A + ACC_SLICE});
$ACC_SLICE *= 2
__m256 vacc = vacc0;
for (; elements >= 8 * sizeof(float); elements -= 8 * sizeof(float)) {
const __m256 vi = _mm256_loadu_ps(input);
input += 8;
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
_mm256_storeu_ps(output, vf);
output += 8;
vacc = _mm256_add_ps(vacc, vf);
}
if (elements != 0) {
assert(elements >= 1 * sizeof(float));
assert(elements <= 7 * sizeof(float));
const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx2_rr1_p5.mask_table[7] - elements));
const __m256 vi = _mm256_maskload_ps(input, vmask);
const __m256 vx = _mm256_sub_ps(vi, vi_max);
__m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
vn = _mm256_sub_ps(vn, vmagic_bias);
__m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
__m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
vp = _mm256_fmadd_ps(vp, vt, vc3);
vp = _mm256_fmadd_ps(vp, vt, vc2);
vp = _mm256_fmadd_ps(vp, vt, vc1);
vt = _mm256_mul_ps(vt, vs);
__m256 vf = _mm256_fmadd_ps(vt, vp, vs);
vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
__m128 vf_lo = _mm256_castps256_ps128(vf);
if (elements & (4 * sizeof(float))) {
_mm_storeu_ps(output, vf_lo);
vf_lo = _mm256_extractf128_ps(vf, 1);
output += 4;
}
if (elements & (2 * sizeof(float))) {
_mm_storel_pi((__m64*) output, vf_lo);
vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
output += 2;
}
if (elements & (1 * sizeof(float))) {
_mm_store_ss(output, vf_lo);
}
vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
}
__m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
_mm_store_ss(sum, vacc_lo);
_mm256_zeroupper();
}