| // Copyright 2020 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| $assert BATCH_TILE % 16 == 0 |
| $assert BATCH_TILE >= 16 |
| $assert RR_STEPS in [1, 2] |
| $assert DIV_ALGO in ["div", "nr1fma", "nr1fma1adj"] |
| $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| $SIMD_TILE = BATCH_TILE // 16 |
| #include <assert.h> |
| |
| #include <immintrin.h> |
| |
| #include <xnnpack/common.h> |
| #include <xnnpack/vunary.h> |
| |
| |
| void xnn_f32_sigmoid_ukernel__avx512f_rr${RR_STEPS}_lut32_p2_perm2_scalef_${DIV_ALGO}_x${BATCH_TILE}( |
| size_t n, |
| const float* x, |
| float* y, |
| const void* params) |
| { |
| assert(n % sizeof(float) == 0); |
| |
| const __m512i vsign_mask = _mm512_set1_epi32(0x80000000); |
| const __m512 vmagic_bias = _mm512_set1_ps(0x1.800000p18f); |
| const __m512 vlog2e = _mm512_set1_ps(0x1.715476p+0f); |
| $if RR_STEPS == 1: |
| const __m512 vminus_ln2 = _mm512_set1_ps(-0x1.62E43p-1f); |
| $else: |
| const __m512 vminus_ln2_hi = _mm512_set1_ps(-0x1.62E43p-1f); |
| const __m512 vminus_ln2_lo = _mm512_set1_ps(0x1.05C61p-29f); |
| |
| const __m512 vtable_hi = _mm512_set_ps( |
| 0x1.F50766p+0f, 0x1.EA4AFAp+0f, 0x1.DFC974p+0f, 0x1.D5818Ep+0f, |
| 0x1.CB720Ep+0f, 0x1.C199BEp+0f, 0x1.B7F770p+0f, 0x1.AE89FAp+0f, |
| 0x1.A5503Cp+0f, 0x1.9C4918p+0f, 0x1.93737Cp+0f, 0x1.8ACE54p+0f, |
| 0x1.82589Ap+0f, 0x1.7A1148p+0f, 0x1.71F75Ep+0f, 0x1.6A09E6p+0f); |
| const __m512 vtable_lo = _mm512_set_ps( |
| 0x1.6247ECp+0f, 0x1.5AB07Ep+0f, 0x1.5342B6p+0f, 0x1.4BFDAEp+0f, |
| 0x1.44E086p+0f, 0x1.3DEA64p+0f, 0x1.371A74p+0f, 0x1.306FE0p+0f, |
| 0x1.29E9E0p+0f, 0x1.2387A6p+0f, 0x1.1D4874p+0f, 0x1.172B84p+0f, |
| 0x1.11301Ep+0f, 0x1.0B5586p+0f, 0x1.059B0Ep+0f, 0x1.000000p+0f); |
| |
| const __m512 vc2 = _mm512_set1_ps(0x1.000000p-1f); |
| const __m512 vc1 = _mm512_set1_ps(0x1.0000F6p-0f); |
| const __m512 vone = _mm512_set1_ps(1.0f); |
| |
| $if BATCH_TILE > 16: |
| for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { |
| const __m512 vx${ABC[0]} = _mm512_loadu_ps(x); |
| $for N in range(1, SIMD_TILE): |
| const __m512 vx${ABC[N]} = _mm512_loadu_ps(x + ${N * 16}); |
| x += ${BATCH_TILE}; |
| |
| $for N in range(SIMD_TILE): |
| const __m512 vz${ABC[N]} = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx${ABC[N]}), vsign_mask)); |
| |
| $for N in range(SIMD_TILE): |
| __m512 vn${ABC[N]} = _mm512_fmadd_ps(vz${ABC[N]}, vlog2e, vmagic_bias); |
| |
| $for N in range(SIMD_TILE): |
| const __m512 vl${ABC[N]} = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn${ABC[N]}), vtable_hi); |
| |
| $for N in range(SIMD_TILE): |
| vn${ABC[N]} = _mm512_sub_ps(vn${ABC[N]}, vmagic_bias); |
| |
| $if RR_STEPS == 1: |
| $for N in range(SIMD_TILE): |
| __m512 vt${ABC[N]} = _mm512_fmadd_ps(vn${ABC[N]}, vminus_ln2, vz${ABC[N]}); |
| $else: |
| $for N in range(SIMD_TILE): |
| __m512 vt${ABC[N]} = _mm512_fmadd_ps(vn${ABC[N]}, vminus_ln2_hi, vz${ABC[N]}); |
| |
| $for N in range(SIMD_TILE): |
| vt${ABC[N]} = _mm512_fmadd_ps(vn${ABC[N]}, vminus_ln2_lo, vt${ABC[N]}); |
| |
| $for N in range(SIMD_TILE): |
| __m512 vp${ABC[N]} = _mm512_fmadd_ps(vt${ABC[N]}, vc2, vc1); |
| |
| $for N in range(SIMD_TILE): |
| vt${ABC[N]} = _mm512_mul_ps(vt${ABC[N]}, vl${ABC[N]}); |
| |
| $for N in range(SIMD_TILE): |
| vp${ABC[N]} = _mm512_fmadd_ps(vt${ABC[N]}, vp${ABC[N]}, vl${ABC[N]}); |
| |
| $for N in range(SIMD_TILE): |
| const __m512 ve${ABC[N]} = _mm512_scalef_ps(vp${ABC[N]}, vn${ABC[N]}); |
| |
| $for N in range(SIMD_TILE): |
| const __m512 vd${ABC[N]} = _mm512_add_ps(ve${ABC[N]}, vone); |
| |
| $if DIV_ALGO == "div": |
| $for N in range(SIMD_TILE): |
| __m512 vf${ABC[N]} = _mm512_div_ps(ve${ABC[N]}, vd${ABC[N]}); |
| $else: |
| $for N in range(SIMD_TILE): |
| __m512 vr${ABC[N]} = _mm512_rcp14_ps(vd${ABC[N]}); |
| |
| $for N in range(SIMD_TILE): |
| vr${ABC[N]} = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr${ABC[N]}, vd${ABC[N]}, vone), vr${ABC[N]}, vr${ABC[N]}); |
| |
| $for N in range(SIMD_TILE): |
| __m512 vf${ABC[N]} = _mm512_mul_ps(ve${ABC[N]}, vr${ABC[N]}); |
| |
| $if DIV_ALGO == "nr1fma1adj": |
| $for N in range(SIMD_TILE): |
| vf${ABC[N]} = _mm512_fmadd_ps(_mm512_fnmadd_ps(vf${ABC[N]}, vd${ABC[N]}, ve${ABC[N]}), vr${ABC[N]}, vf${ABC[N]}); |
| |
| $for N in range(SIMD_TILE): |
| vf${ABC[N]} = _mm512_mask_sub_ps(vf${ABC[N]}, _mm512_testn_epi32_mask(_mm512_castps_si512(vx${ABC[N]}), vsign_mask), vone, vf${ABC[N]}); |
| |
| _mm512_storeu_ps(y, vf${ABC[0]}); |
| $for N in range(1, SIMD_TILE): |
| _mm512_storeu_ps(y + ${N * 16}, vf${ABC[N]}); |
| y += ${BATCH_TILE}; |
| } |
| for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) { |
| const __m512 vx = _mm512_loadu_ps(x); |
| x += 16; |
| |
| const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask)); |
| |
| __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias); |
| const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi); |
| vn = _mm512_sub_ps(vn, vmagic_bias); |
| |
| $if RR_STEPS == 1: |
| __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz); |
| $else: |
| __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz); |
| vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt); |
| |
| __m512 vp = _mm512_fmadd_ps(vt, vc2, vc1); |
| vt = _mm512_mul_ps(vt, vl); |
| vp = _mm512_fmadd_ps(vt, vp, vl); |
| |
| const __m512 ve = _mm512_scalef_ps(vp, vn); |
| const __m512 vd = _mm512_add_ps(ve, vone); |
| |
| $if DIV_ALGO == "div": |
| __m512 vf = _mm512_div_ps(ve, vd); |
| $else: |
| __m512 vr = _mm512_rcp14_ps(vd); |
| vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr); |
| |
| __m512 vf = _mm512_mul_ps(ve, vr); |
| $if DIV_ALGO == "nr1fma1adj": |
| vf = _mm512_fmadd_ps(_mm512_fnmadd_ps(vf, vd, ve), vr, vf); |
| |
| vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf); |
| |
| _mm512_storeu_ps(y, vf); |
| y += 16; |
| } |
| if XNN_UNLIKELY(n != 0) { |
| assert(n >= 1 * sizeof(float)); |
| assert(n <= 15 * sizeof(float)); |
| |
| // Prepare mask for valid 32-bit elements (depends on n). |
| n >>= 2 /* log2(sizeof(float)) */; |
| const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1))); |
| |
| const __m512 vx = _mm512_maskz_loadu_ps(vmask, x); |
| const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask)); |
| |
| __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias); |
| const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi); |
| vn = _mm512_sub_ps(vn, vmagic_bias); |
| |
| $if RR_STEPS == 1: |
| __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz); |
| $else: |
| __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz); |
| vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt); |
| |
| __m512 vp = _mm512_fmadd_ps(vt, vc2, vc1); |
| vt = _mm512_mul_ps(vt, vl); |
| vp = _mm512_fmadd_ps(vt, vp, vl); |
| |
| const __m512 ve = _mm512_scalef_ps(vp, vn); |
| const __m512 vd = _mm512_add_ps(ve, vone); |
| |
| $if DIV_ALGO == "div": |
| __m512 vf = _mm512_div_ps(ve, vd); |
| $else: |
| __m512 vr = _mm512_rcp14_ps(vd); |
| vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr); |
| |
| __m512 vf = _mm512_mul_ps(ve, vr); |
| $if DIV_ALGO == "nr1fma1adj": |
| vf = _mm512_fmadd_ps(_mm512_fnmadd_ps(vf, vd, ve), vr, vf); |
| |
| vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf); |
| |
| _mm512_mask_storeu_ps(y, vmask, vf); |
| } |
| } |