blob: 6d7567332c315ab02b971a14f488e70574e5686c [file] [log] [blame]
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert BATCH_TILE % 4 == 0
$assert BATCH_TILE >= 4
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vunary.h>
void xnn_f32_sigmoid_ukernel__wasmsimd_p5_div_x${BATCH_TILE}(
size_t n,
const float* x,
float* y,
const void* params) XNN_DISABLE_TSAN
{
assert(n % sizeof(float) == 0);
const v128_t vmagic_bias = wasm_f32x4_splat(0x1.8000FEp23f);
const v128_t vminus_log2e = wasm_f32x4_splat(-0x1.715476p+0f);
const v128_t vln2_hi = wasm_f32x4_splat(0x1.62E400p-1f);
const v128_t vln2_lo = wasm_f32x4_splat(0x1.7F7D1Cp-20f);
const v128_t vc5 = wasm_f32x4_splat(-0x1.0F9F9Cp-7f);
const v128_t vc4 = wasm_f32x4_splat( 0x1.573A1Ap-5f);
const v128_t vc3 = wasm_f32x4_splat(-0x1.555A80p-3f);
const v128_t vc2 = wasm_f32x4_splat( 0x1.FFFDC6p-2f);
const v128_t vc1 = wasm_f32x4_splat(-0x1.FFFFF6p-1f);
const v128_t vone = wasm_f32x4_splat(1.0f);
const v128_t vdenorm_cutoff = wasm_f32x4_splat(0x1.5D589Ep+6f);
$if BATCH_TILE > 4:
for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
const v128_t vx${ABC[0:4]} = wasm_v128_load(x);
$for N in range(4, BATCH_TILE, 4):
const v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N});
x += ${BATCH_TILE};
$for N in range(0, BATCH_TILE, 4):
const v128_t vz${ABC[N:N+4]} = wasm_f32x4_abs(vx${ABC[N:N+4]});
$for N in range(0, BATCH_TILE, 4):
v128_t vn${ABC[N:N+4]} = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz${ABC[N:N+4]}, vminus_log2e));
$for N in range(0, BATCH_TILE, 4):
const v128_t vs${ABC[N:N+4]} = wasm_i32x4_shl(vn${ABC[N:N+4]}, 23);
$for N in range(0, BATCH_TILE, 4):
vn${ABC[N:N+4]} = wasm_f32x4_sub(vn${ABC[N:N+4]}, vmagic_bias);
$for N in range(0, BATCH_TILE, 4):
v128_t vt${ABC[N:N+4]} = wasm_f32x4_add(vz${ABC[N:N+4]}, wasm_f32x4_mul(vn${ABC[N:N+4]}, vln2_hi));
$for N in range(0, BATCH_TILE, 4):
vt${ABC[N:N+4]} = wasm_f32x4_add(vt${ABC[N:N+4]}, wasm_f32x4_mul(vn${ABC[N:N+4]}, vln2_lo));
$for N in range(0, BATCH_TILE, 4):
v128_t vp${ABC[N:N+4]} = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt${ABC[N:N+4]}, vc5));
$for N in range(0, BATCH_TILE, 4):
vp${ABC[N:N+4]} = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt${ABC[N:N+4]}, vp${ABC[N:N+4]}));
$for N in range(0, BATCH_TILE, 4):
vp${ABC[N:N+4]} = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt${ABC[N:N+4]}, vp${ABC[N:N+4]}));
$for N in range(0, BATCH_TILE, 4):
vp${ABC[N:N+4]} = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt${ABC[N:N+4]}, vp${ABC[N:N+4]}));
$for N in range(0, BATCH_TILE, 4):
vt${ABC[N:N+4]} = wasm_f32x4_mul(vt${ABC[N:N+4]}, vs${ABC[N:N+4]});
$for N in range(0, BATCH_TILE, 4):
const v128_t ve${ABC[N:N+4]} = wasm_f32x4_add(vs${ABC[N:N+4]}, wasm_f32x4_mul(vt${ABC[N:N+4]}, vp${ABC[N:N+4]}));
$for N in range(0, BATCH_TILE, 4):
const v128_t vd${ABC[N:N+4]} = wasm_f32x4_add(ve${ABC[N:N+4]}, vone);
$for N in range(0, BATCH_TILE, 4):
v128_t vf${ABC[N:N+4]} = wasm_f32x4_div(ve${ABC[N:N+4]}, vd${ABC[N:N+4]});
$for N in range(0, BATCH_TILE, 4):
vf${ABC[N:N+4]} = wasm_v128_andnot(vf${ABC[N:N+4]}, wasm_f32x4_gt(vz${ABC[N:N+4]}, vdenorm_cutoff));
$for N in range(0, BATCH_TILE, 4):
vf${ABC[N:N+4]} = wasm_v128_bitselect(vf${ABC[N:N+4]}, wasm_f32x4_sub(vone, vf${ABC[N:N+4]}), wasm_i32x4_shr(vx${ABC[N:N+4]}, 31));
wasm_v128_store(y, vf${ABC[0:4]});
$for N in range(4, BATCH_TILE, 4):
wasm_v128_store(y + ${N}, vf${ABC[N:N+4]});
y += ${BATCH_TILE};
}
for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
const v128_t vx = wasm_v128_load(x);
x += 4;
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vminus_log2e));
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vln2_lo));
v128_t vp = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt, vc5));
vp = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt, vp));
vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt, vp));
vp = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt, vp));
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(vs, wasm_f32x4_mul(vt, vp));
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
vf = wasm_v128_bitselect(vf, wasm_f32x4_sub(vone, vf), wasm_i32x4_shr(vx, 31));
wasm_v128_store(y, vf);
y += 4;
}
if XNN_UNLIKELY(n != 0) {
const v128_t vx = wasm_v128_load(x);
const v128_t vz = wasm_f32x4_abs(vx);
v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vminus_log2e));
const v128_t vs = wasm_i32x4_shl(vn, 23);
vn = wasm_f32x4_sub(vn, vmagic_bias);
v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vln2_hi));
vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vln2_lo));
v128_t vp = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt, vc5));
vp = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt, vp));
vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt, vp));
vp = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt, vp));
vt = wasm_f32x4_mul(vt, vs);
const v128_t ve = wasm_f32x4_add(vs, wasm_f32x4_mul(vt, vp));
const v128_t vd = wasm_f32x4_add(ve, vone);
v128_t vf = wasm_f32x4_div(ve, vd);
vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
vf = wasm_v128_bitselect(vf, wasm_f32x4_sub(vone, vf), wasm_i32x4_shr(vx, 31));
if (n & (2 * sizeof(float))) {
*((double*) y) = wasm_f64x2_extract_lane(vf, 0);
vf = wasm_v32x4_shuffle(vf, vf, 2, 3, 2, 3);
y += 2;
}
if (n & (1 * sizeof(float))) {
*y = wasm_f32x4_extract_lane(vf, 0);
}
}
}