blob: c339856e871f9c41b7e8edc21577463a866fe10d [file] [log] [blame]
Marat Dukhaned6baaf2020-12-01 15:07:08 -08001// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 4 == 0
7$assert BATCH_TILE >= 4
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <wasm_simd128.h>
12
13#include <xnnpack/vunary.h>
14#include <xnnpack/common.h>
15
16
17void xnn_f32_velu_ukernel__wasmsimd_${"x86" if X86 else "arm"}_rr2_p6_x${BATCH_TILE}(
18 size_t n,
19 const float* x,
20 float* y,
21 const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
22{
23 assert(n != 0);
24 assert(n % sizeof(float) == 0);
25 assert(x != NULL);
26 assert(y != NULL);
27
28 const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
29 const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
30 const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
31
32 const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
33 const v128_t vmagic_bias = wasm_f32x4_splat(0x1.8000FEp23f);
34 const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
35 const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E440p-1f);
36 const v128_t vminus_ln2_lo = wasm_f32x4_splat(0x1.0105C6p-21f);
37 const v128_t vc6 = wasm_f32x4_splat(0x1.6b7338p-10f);
38 const v128_t vc5 = wasm_f32x4_splat(0x1.12278Ep-7f);
39 const v128_t vc4 = wasm_f32x4_splat(0x1.555716p-5f);
40 const v128_t vc3 = wasm_f32x4_splat(0x1.5554B0p-3f);
41 const v128_t vc2 = wasm_f32x4_splat(0x1.FFFFFEp-2f);
42 const v128_t vone = wasm_f32x4_splat(1.0f);
43
44 $if BATCH_TILE > 4:
45 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
46 v128_t vx${ABC[0:4]} = wasm_v128_load(x);
47 $for N in range(4, BATCH_TILE, 4):
48 v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N});
49 x += ${BATCH_TILE};
50
51 $for N in range(0, BATCH_TILE, 4):
52 $if X86:
53 const v128_t vz${ABC[N:N+4]} = wasm_f32x4_mul(vx${ABC[N:N+4]}, vprescale);
54 $else:
55 const v128_t vz${ABC[N:N+4]} = wasm_f32x4_max(wasm_f32x4_mul(vx${ABC[N:N+4]}, vprescale), vsat_cutoff);
56
57 $for N in range(0, BATCH_TILE, 4):
58 v128_t vn${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vz${ABC[N:N+4]}, vlog2e), vmagic_bias);
59
60 $for N in range(0, BATCH_TILE, 4):
61 v128_t vs${ABC[N:N+4]} = wasm_i32x4_shl(vn${ABC[N:N+4]}, 23);
62
63 $for N in range(0, BATCH_TILE, 4):
64 vn${ABC[N:N+4]} = wasm_f32x4_sub(vn${ABC[N:N+4]}, vmagic_bias);
65
Marat Dukhaned6baaf2020-12-01 15:07:08 -080066 $for N in range(0, BATCH_TILE, 4):
67 v128_t vt${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vn${ABC[N:N+4]}, vminus_ln2_hi), vz${ABC[N:N+4]});
Marat Dukhane332dd62020-12-14 14:31:54 -080068 $if X86:
69 const v128_t vsatm${ABC[N:N+4]} = wasm_f32x4_le(vz${ABC[N:N+4]}, vsat_cutoff);
Marat Dukhaned6baaf2020-12-01 15:07:08 -080070
71 $for N in range(0, BATCH_TILE, 4):
72 vt${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vn${ABC[N:N+4]}, vminus_ln2_lo), vt${ABC[N:N+4]});
Marat Dukhane332dd62020-12-14 14:31:54 -080073 $if X86:
74 vs${ABC[N:N+4]} = wasm_v128_andnot(vs${ABC[N:N+4]}, vsatm${ABC[N:N+4]});
Marat Dukhaned6baaf2020-12-01 15:07:08 -080075
76 $for N in range(0, BATCH_TILE, 4):
Marat Dukhane332dd62020-12-14 14:31:54 -080077 $if X86:
78 vt${ABC[N:N+4]} = wasm_v128_andnot(vt${ABC[N:N+4]}, vsatm${ABC[N:N+4]});
Marat Dukhaned6baaf2020-12-01 15:07:08 -080079 v128_t vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt${ABC[N:N+4]}), vc5);
80
81 $for N in range(0, BATCH_TILE, 4):
82 vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vc4);
83
84 $for N in range(0, BATCH_TILE, 4):
85 vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vc3);
86
87 $for N in range(0, BATCH_TILE, 4):
88 vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vc2);
89
90 $for N in range(0, BATCH_TILE, 4):
91 vp${ABC[N:N+4]} = wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
92
93 $for N in range(0, BATCH_TILE, 4):
94 vt${ABC[N:N+4]} = wasm_f32x4_mul(vt${ABC[N:N+4]}, vs${ABC[N:N+4]});
95 vs${ABC[N:N+4]} = wasm_f32x4_sub(vs${ABC[N:N+4]}, vone);
96
97 $for N in range(0, BATCH_TILE, 4):
98 vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vt${ABC[N:N+4]});
99
100 $for N in range(0, BATCH_TILE, 4):
101 const v128_t ve${ABC[N:N+4]} = wasm_f32x4_mul(wasm_f32x4_add(vp${ABC[N:N+4]}, vs${ABC[N:N+4]}), valpha);
102
103 $for N in range(0, BATCH_TILE, 4):
Marat Dukhane332dd62020-12-14 14:31:54 -0800104 const v128_t vsignm${ABC[N:N+4]} = wasm_i32x4_shr(vx${ABC[N:N+4]}, 31);
Marat Dukhaned6baaf2020-12-01 15:07:08 -0800105 vx${ABC[N:N+4]} = wasm_f32x4_mul(vx${ABC[N:N+4]}, vbeta);
106
107 $for N in range(0, BATCH_TILE, 4):
Marat Dukhane332dd62020-12-14 14:31:54 -0800108 const v128_t vy${ABC[N:N+4]} = wasm_v128_bitselect(ve${ABC[N:N+4]}, vx${ABC[N:N+4]}, vsignm${ABC[N:N+4]});
Marat Dukhaned6baaf2020-12-01 15:07:08 -0800109
110 wasm_v128_store(y, vy${ABC[0:4]});
111 $for N in range(4, BATCH_TILE, 4):
112 wasm_v128_store(y + ${N}, vy${ABC[N:N+4]});
113 y += ${BATCH_TILE};
114 }
115 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
116 v128_t vx = wasm_v128_load(x);
117 x += 4;
118
119 $if X86:
120 const v128_t vz = wasm_f32x4_mul(vx, vprescale);
121 $else:
122 const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
123
124 v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
125 v128_t vs = wasm_i32x4_shl(vn, 23);
126 vn = wasm_f32x4_sub(vn, vmagic_bias);
Marat Dukhaned6baaf2020-12-01 15:07:08 -0800127
128 v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
Marat Dukhane332dd62020-12-14 14:31:54 -0800129 $if X86:
130 const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
Marat Dukhaned6baaf2020-12-01 15:07:08 -0800131 vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
Marat Dukhane332dd62020-12-14 14:31:54 -0800132 $if X86:
133 vs = wasm_v128_andnot(vs, vsatm);
134 vt = wasm_v128_andnot(vt, vsatm);
Marat Dukhaned6baaf2020-12-01 15:07:08 -0800135
136 v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
137 vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
138 vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
139 vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
140 vp = wasm_f32x4_mul(vp, vt);
141
142 vt = wasm_f32x4_mul(vt, vs);
143 vs = wasm_f32x4_sub(vs, vone);
144 vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
145 const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
146
Marat Dukhane332dd62020-12-14 14:31:54 -0800147 const v128_t vsignm = wasm_i32x4_shr(vx, 31);
Marat Dukhaned6baaf2020-12-01 15:07:08 -0800148 vx = wasm_f32x4_mul(vx, vbeta);
Marat Dukhane332dd62020-12-14 14:31:54 -0800149 const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
Marat Dukhaned6baaf2020-12-01 15:07:08 -0800150
151 wasm_v128_store(y, vy);
152 y += 4;
153 }
154 if XNN_UNLIKELY(n != 0) {
155 v128_t vx = wasm_v128_load(x);
156
157 $if X86:
158 const v128_t vz = wasm_f32x4_mul(vx, vprescale);
159 $else:
160 const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
161
162 v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
163 v128_t vs = wasm_i32x4_shl(vn, 23);
164 vn = wasm_f32x4_sub(vn, vmagic_bias);
Marat Dukhaned6baaf2020-12-01 15:07:08 -0800165
166 v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
Marat Dukhane332dd62020-12-14 14:31:54 -0800167 $if X86:
168 const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
Marat Dukhaned6baaf2020-12-01 15:07:08 -0800169 vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
Marat Dukhane332dd62020-12-14 14:31:54 -0800170 $if X86:
171 vs = wasm_v128_andnot(vs, vsatm);
172 vt = wasm_v128_andnot(vt, vsatm);
Marat Dukhaned6baaf2020-12-01 15:07:08 -0800173
174 v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
175 vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
176 vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
177 vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
178 vp = wasm_f32x4_mul(vp, vt);
179
180 vt = wasm_f32x4_mul(vt, vs);
181 vs = wasm_f32x4_sub(vs, vone);
182 vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
183 const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
184
Marat Dukhane332dd62020-12-14 14:31:54 -0800185 const v128_t vsignm = wasm_i32x4_shr(vx, 31);
Marat Dukhaned6baaf2020-12-01 15:07:08 -0800186 vx = wasm_f32x4_mul(vx, vbeta);
Marat Dukhane332dd62020-12-14 14:31:54 -0800187 v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
Marat Dukhaned6baaf2020-12-01 15:07:08 -0800188
189 if (n & (2 * sizeof(float))) {
190 *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
191 vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
192 y += 2;
193 }
194 if (n & (1 * sizeof(float))) {
195 *y = wasm_f32x4_extract_lane(vy, 0);
196 }
197 }
198}