GEMM/IGEMM implementations in WAsm SIMD intrinsics
PiperOrigin-RevId: 316620752
diff --git a/src/f32-igemm/wasmsimd-splat.c.in b/src/f32-igemm/wasmsimd-splat.c.in
new file mode 100644
index 0000000..a4edeaf
--- /dev/null
+++ b/src/f32-igemm/wasmsimd-splat.c.in
@@ -0,0 +1,174 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$assert NR % 4 == 0
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+#include <assert.h>
+
+#include <wasm_simd128.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_minmax_ukernel_${MR}x${NR}__wasmsimd_splat_${"x86" if X86 else "arm"}(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ size_t ks,
+ const float**restrict a,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ size_t a_offset,
+ const float* zero,
+ const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
+{
+ assert(mr != 0);
+ assert(mr <= ${MR});
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(ks != 0);
+ assert(ks % (${MR} * sizeof(void*)) == 0);
+ assert(a_offset % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+
+ float* c0 = c;
+ $for M in range(1, MR):
+ float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride);
+ $if M % 2 == 0:
+ if XNN_UNPREDICTABLE(mr <= ${M}) {
+ c${M} = c${M-1};
+ }
+ $elif M + 1 == MR:
+ if XNN_UNPREDICTABLE(mr != ${M+1}) {
+ c${M} = c${M-1};
+ }
+ $else:
+ if XNN_UNPREDICTABLE(mr < ${M+1}) {
+ c${M} = c${M-1};
+ }
+
+ do {
+ v128_t vacc0x${ABC[0:4]} = wasm_v128_load(w);
+ $for N in range(4, NR, 4):
+ v128_t vacc0x${ABC[N:N+4]} = wasm_v128_load(w + ${N});
+ $for M in range(1, MR):
+ $for N in range(0, NR, 4):
+ v128_t vacc${M}x${ABC[N:N+4]} = vacc0x${ABC[N:N+4]};
+ w += ${NR};
+
+ size_t p = ks;
+ do {
+ $for M in range(MR):
+ const float* restrict a${M} = a[${M}];
+ assert(a${M} != NULL);
+ if XNN_UNPREDICTABLE(a${M} != zero) {
+ a${M} = (const float*) ((uintptr_t) a${M} + a_offset);
+ }
+ a += ${MR};
+
+ size_t k = kc;
+ while (k >= 4 * sizeof(float)) {
+ $for M in range(MR):
+ const v128_t va${M} = wasm_v128_load(a${M});
+ a${M} += 4;
+
+ $for L in range(4):
+ $for M in range(MR):
+ const v128_t va${M}c${L} = wasm_v32x4_shuffle(va${M}, va${M}, ${L}, ${L}, ${L}, ${L});
+
+ $for N in range(0, NR, 4):
+ const v128_t vb${ABC[N:N+4]}c${L} = wasm_v128_load(w + ${L * NR + N});
+
+ $for N in range(0, NR, 4):
+ $for M in range(MR):
+ vacc${M}x${ABC[N:N+4]} = wasm_f32x4_add(vacc${M}x${ABC[N:N+4]}, wasm_f32x4_mul(va${M}c${L}, vb${ABC[N:N+4]}c${L}));
+
+ w += ${4 * NR};
+ k -= 4 * sizeof(float);
+ }
+ if XNN_UNLIKELY(k != 0) {
+ do {
+ const v128_t vb${ABC[0:4]} = wasm_v128_load(w);
+ $for N in range(4, NR, 4):
+ const v128_t vb${ABC[N:N+4]} = wasm_v128_load(w + ${N});
+ w += ${NR};
+
+ $for M in range(MR):
+ const v128_t va${M} = wasm_v32x4_load_splat(a${M});
+ a${M} += 1;
+
+ $for M in range(MR):
+ $for N in range(0, NR, 4):
+ vacc${M}x${ABC[N:N+4]} = wasm_f32x4_add(vacc${M}x${ABC[N:N+4]}, wasm_f32x4_mul(va${M}, vb${ABC[N:N+4]}));
+ k -= sizeof(float);
+ } while (k != 0);
+ }
+ p -= ${MR} * sizeof(void*);
+ } while (p != 0);
+
+ const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max);
+ $for N in range(0, NR, 4):
+ $for M in range(MR):
+ $if X86:
+ vacc${M}x${ABC[N:N+4]} = wasm_v128_bitselect(vacc${M}x${ABC[N:N+4]}, vmax, wasm_f32x4_le(vacc${M}x${ABC[N:N+4]}, vmax));
+ $else:
+ vacc${M}x${ABC[N:N+4]} = wasm_f32x4_min(vacc${M}x${ABC[N:N+4]}, vmax);
+
+ const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min);
+ $for N in range(0, NR, 4):
+ $for M in range(MR):
+ $if X86:
+ vacc${M}x${ABC[N:N+4]} = wasm_v128_bitselect(vmin, vacc${M}x${ABC[N:N+4]}, wasm_f32x4_lt(vacc${M}x${ABC[N:N+4]}, vmin));
+ $else:
+ vacc${M}x${ABC[N:N+4]} = wasm_f32x4_max(vacc${M}x${ABC[N:N+4]}, vmin);
+
+ if XNN_LIKELY(nc >= ${NR}) {
+ $for M in reversed(range(MR)):
+ wasm_v128_store(c${M}, vacc${M}x${ABC[0:4]});
+ $for N in range(4, NR, 4):
+ wasm_v128_store(c${M} + ${N}, vacc${M}x${ABC[N:N+4]});
+ c${M} = (float*) ((uintptr_t) c${M} + cn_stride);
+
+ a = (const float**restrict) ((uintptr_t) a - ks);
+ nc -= ${NR};
+ } else {
+ $for LOG2N in reversed(range(NR.bit_length())):
+ $if NR != 1 << LOG2N:
+ if (nc & ${1 << LOG2N}) {
+ $if LOG2N >= 2:
+ $for M in reversed(range(MR)):
+ wasm_v128_store(c${M}, vacc${M}x${ABC[0:4]});
+ $for N in range(4, 1 << LOG2N, 4):
+ wasm_v128_store(c${M} + ${N}, vacc${M}x${ABC[N:N+4]});
+
+ $for M in reversed(range(MR)):
+ $for N in range(0, 1 << (LOG2N - 1), 4):
+ vacc${M}x${ABC[N:N+4]} = vacc${M}x${ABC[N + (1 << LOG2N):N + (1 << LOG2N)+4]};
+
+ $for M in reversed(range(MR)):
+ c${M} += ${1 << LOG2N};
+ $elif LOG2N == 1:
+ $for M in reversed(range(MR)):
+ *((double*) c${M}) = wasm_f64x2_extract_lane(vacc${M}x${ABC[0:4]}, 0);
+
+ $for M in reversed(range(MR)):
+ vacc${M}x${ABC[0:4]} = wasm_v32x4_shuffle(vacc${M}x${ABC[0:4]}, vacc${M}x${ABC[0:4]}, 2, 3, 2, 3);
+
+ $for M in reversed(range(MR)):
+ c${M} += 2;
+ $elif LOG2N == 0:
+ $for M in reversed(range(MR)):
+ *c${M} = wasm_f32x4_extract_lane(vacc${M}x${ABC[0:4]}, 0);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}