blob: 31c3343e0aebe7870f27f2870514b75dd2ecc6f8 [file] [log] [blame]
Marat Dukhaned6baaf2020-12-01 15:07:08 -08001// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 4 == 0
7$assert BATCH_TILE >= 4
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <wasm_simd128.h>
12
13#include <xnnpack/vunary.h>
14#include <xnnpack/common.h>
15
16
17void xnn_f32_velu_ukernel__wasmsimd_${"x86" if X86 else "arm"}_rr2_p6_x${BATCH_TILE}(
18 size_t n,
19 const float* x,
20 float* y,
21 const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
22{
23 assert(n != 0);
24 assert(n % sizeof(float) == 0);
25 assert(x != NULL);
26 assert(y != NULL);
27
28 const v128_t vprescale = wasm_v32x4_load_splat(&params->scalar.prescale);
29 const v128_t valpha = wasm_v32x4_load_splat(&params->scalar.alpha);
30 const v128_t vbeta = wasm_v32x4_load_splat(&params->scalar.beta);
31
32 const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f);
33 const v128_t vmagic_bias = wasm_f32x4_splat(0x1.8000FEp23f);
34 const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f);
35 const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E440p-1f);
36 const v128_t vminus_ln2_lo = wasm_f32x4_splat(0x1.0105C6p-21f);
37 const v128_t vc6 = wasm_f32x4_splat(0x1.6b7338p-10f);
38 const v128_t vc5 = wasm_f32x4_splat(0x1.12278Ep-7f);
39 const v128_t vc4 = wasm_f32x4_splat(0x1.555716p-5f);
40 const v128_t vc3 = wasm_f32x4_splat(0x1.5554B0p-3f);
41 const v128_t vc2 = wasm_f32x4_splat(0x1.FFFFFEp-2f);
42 const v128_t vone = wasm_f32x4_splat(1.0f);
43
44 $if BATCH_TILE > 4:
45 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
46 v128_t vx${ABC[0:4]} = wasm_v128_load(x);
47 $for N in range(4, BATCH_TILE, 4):
48 v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N});
49 x += ${BATCH_TILE};
50
51 $for N in range(0, BATCH_TILE, 4):
52 $if X86:
53 const v128_t vz${ABC[N:N+4]} = wasm_f32x4_mul(vx${ABC[N:N+4]}, vprescale);
54 $else:
55 const v128_t vz${ABC[N:N+4]} = wasm_f32x4_max(wasm_f32x4_mul(vx${ABC[N:N+4]}, vprescale), vsat_cutoff);
56
57 $for N in range(0, BATCH_TILE, 4):
58 v128_t vn${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vz${ABC[N:N+4]}, vlog2e), vmagic_bias);
59
60 $for N in range(0, BATCH_TILE, 4):
61 v128_t vs${ABC[N:N+4]} = wasm_i32x4_shl(vn${ABC[N:N+4]}, 23);
62
63 $for N in range(0, BATCH_TILE, 4):
64 vn${ABC[N:N+4]} = wasm_f32x4_sub(vn${ABC[N:N+4]}, vmagic_bias);
65
66 $if X86:
67 $for N in range(0, BATCH_TILE, 4):
68 vs${ABC[N:N+4]} = wasm_v128_andnot(vs${ABC[N:N+4]}, wasm_f32x4_le(vz${ABC[N:N+4]}, vsat_cutoff));
69
70 $for N in range(0, BATCH_TILE, 4):
71 v128_t vt${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vn${ABC[N:N+4]}, vminus_ln2_hi), vz${ABC[N:N+4]});
72
73 $for N in range(0, BATCH_TILE, 4):
74 vt${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vn${ABC[N:N+4]}, vminus_ln2_lo), vt${ABC[N:N+4]});
75
76 $for N in range(0, BATCH_TILE, 4):
77 v128_t vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt${ABC[N:N+4]}), vc5);
78
79 $for N in range(0, BATCH_TILE, 4):
80 vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vc4);
81
82 $for N in range(0, BATCH_TILE, 4):
83 vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vc3);
84
85 $for N in range(0, BATCH_TILE, 4):
86 vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vc2);
87
88 $for N in range(0, BATCH_TILE, 4):
89 vp${ABC[N:N+4]} = wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
90
91 $for N in range(0, BATCH_TILE, 4):
92 vt${ABC[N:N+4]} = wasm_f32x4_mul(vt${ABC[N:N+4]}, vs${ABC[N:N+4]});
93 vs${ABC[N:N+4]} = wasm_f32x4_sub(vs${ABC[N:N+4]}, vone);
94
95 $for N in range(0, BATCH_TILE, 4):
96 vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vt${ABC[N:N+4]});
97
98 $for N in range(0, BATCH_TILE, 4):
99 const v128_t ve${ABC[N:N+4]} = wasm_f32x4_mul(wasm_f32x4_add(vp${ABC[N:N+4]}, vs${ABC[N:N+4]}), valpha);
100
101 $for N in range(0, BATCH_TILE, 4):
102 const v128_t vm${ABC[N:N+4]} = wasm_i32x4_shr(vx${ABC[N:N+4]}, 31);
103 vx${ABC[N:N+4]} = wasm_f32x4_mul(vx${ABC[N:N+4]}, vbeta);
104
105 $for N in range(0, BATCH_TILE, 4):
106 const v128_t vy${ABC[N:N+4]} = wasm_v128_bitselect(ve${ABC[N:N+4]}, vx${ABC[N:N+4]}, vm${ABC[N:N+4]});
107
108 wasm_v128_store(y, vy${ABC[0:4]});
109 $for N in range(4, BATCH_TILE, 4):
110 wasm_v128_store(y + ${N}, vy${ABC[N:N+4]});
111 y += ${BATCH_TILE};
112 }
113 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
114 v128_t vx = wasm_v128_load(x);
115 x += 4;
116
117 $if X86:
118 const v128_t vz = wasm_f32x4_mul(vx, vprescale);
119 $else:
120 const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
121
122 v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
123 v128_t vs = wasm_i32x4_shl(vn, 23);
124 vn = wasm_f32x4_sub(vn, vmagic_bias);
125 $if X86:
126 vs = wasm_v128_andnot(vs, wasm_f32x4_le(vz, vsat_cutoff));
127
128 v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
129 vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
130
131 v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
132 vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
133 vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
134 vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
135 vp = wasm_f32x4_mul(vp, vt);
136
137 vt = wasm_f32x4_mul(vt, vs);
138 vs = wasm_f32x4_sub(vs, vone);
139 vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
140 const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
141
142 const v128_t vm = wasm_i32x4_shr(vx, 31);
143 vx = wasm_f32x4_mul(vx, vbeta);
144 const v128_t vy = wasm_v128_bitselect(ve, vx, vm);
145
146 wasm_v128_store(y, vy);
147 y += 4;
148 }
149 if XNN_UNLIKELY(n != 0) {
150 v128_t vx = wasm_v128_load(x);
151
152 $if X86:
153 const v128_t vz = wasm_f32x4_mul(vx, vprescale);
154 $else:
155 const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
156
157 v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
158 v128_t vs = wasm_i32x4_shl(vn, 23);
159 vn = wasm_f32x4_sub(vn, vmagic_bias);
160 $if X86:
161 vs = wasm_v128_andnot(vs, wasm_f32x4_le(vz, vsat_cutoff));
162
163 v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
164 vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
165
166 v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5);
167 vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4);
168 vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3);
169 vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2);
170 vp = wasm_f32x4_mul(vp, vt);
171
172 vt = wasm_f32x4_mul(vt, vs);
173 vs = wasm_f32x4_sub(vs, vone);
174 vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
175 const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
176
177 const v128_t vm = wasm_i32x4_shr(vx, 31);
178 vx = wasm_f32x4_mul(vx, vbeta);
179 v128_t vy = wasm_v128_bitselect(ve, vx, vm);
180
181 if (n & (2 * sizeof(float))) {
182 *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
183 vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
184 y += 2;
185 }
186 if (n & (1 * sizeof(float))) {
187 *y = wasm_f32x4_extract_lane(vy, 0);
188 }
189 }
190}