blob: 3fdfa0b7071710c9e9bb5d9c5a55140620da8715 [file] [log] [blame]
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert NR == 2
$assert MR % 2 == 0
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gemm.h>
$assert ACTIVATION in ["LINEAR", "RELU", "MINMAX"]
$ACTIVATION_SUFFIX = {"LINEAR": ""}.get(ACTIVATION, "_" + ACTIVATION.lower())
$ARCH_SUFFIX = "" if ACTIVATION in ["LINEAR", "RELU"] else "_x86" if X86 else "_arm"
$PARAMS = {"LINEAR": "xnn_f32_default_params", "RELU": "xnn_f32_relu_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
void xnn_f32_gemm${ACTIVATION_SUFFIX}_ukernel_${MR}x${NR}c4__wasmsimd${ARCH_SUFFIX}(
size_t mr,
size_t nc,
size_t kc,
const float* restrict a,
size_t a_stride,
const float* restrict w,
float* restrict c,
size_t cm_stride,
size_t cn_stride,
const union ${PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= ${MR});
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(float) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const float* a0 = a;
float* c0 = c;
$for M in range(1, MR):
const float* a${M} = (const float*) ((uintptr_t) a${M-1} + a_stride);
float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride);
$if M % 2 == 0:
if XNN_UNPREDICTABLE(mr <= ${M}) {
a${M} = a${M-1};
c${M} = c${M-1};
}
$elif M + 1 == MR:
if XNN_UNPREDICTABLE(mr != ${M+1}) {
a${M} = a${M-1};
c${M} = c${M-1};
}
$else:
if XNN_UNPREDICTABLE(mr < ${M+1}) {
a${M} = a${M-1};
c${M} = c${M-1};
}
$if ACTIVATION == "MINMAX" and not X86:
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
v128_t vacc0x0c4 = wasm_f32x4_replace_lane(wasm_f32x4_const_splat(0.0f), 0, w[0]);
$for N in range(1, NR):
v128_t vacc0x${N}c4 = wasm_f32x4_replace_lane(vacc0x0c4, 0, w[${N}]);
$for M in range(1, MR):
$for N in range(NR):
v128_t vacc${M}x${N}c4 = vacc0x${N}c4;
w += ${NR};
size_t k = kc;
for (; k >= 4 * sizeof(float); k -= 4 * sizeof(float)) {
$for M in range(MR):
const v128_t va${M} = wasm_v128_load(a${M});
a${M} += 4;
const v128_t vb0 = wasm_v128_load(w);
$for N in range(1, NR):
const v128_t vb${N} = wasm_v128_load(w + ${N * 4});
w += ${NR * 4};
$for M in range(MR):
$for N in range(NR):
vacc${M}x${N}c4 = wasm_f32x4_add(vacc${M}x${N}c4, wasm_f32x4_mul(va${M}, vb${N}));
}
if XNN_UNLIKELY(k != 0) {
$for M in range(MR):
const v128_t va${M} = wasm_v128_load(a${M});
a${M} = (const float*) ((uintptr_t) a${M} + k);
const v128_t vb0 = wasm_v128_load(w);
$for N in range(1, NR):
const v128_t vb${N} = wasm_v128_load(w + ${N * 4});
w += ${NR * 4};
const v128_t vzero = wasm_f32x4_const_splat(0.0f);
$for N in range(NR):
const v128_t vmask${N} = wasm_f32x4_eq(vb${N}, vzero);
$for M in range(MR):
$for N in range(NR):
vacc${M}x${N}c4 = wasm_f32x4_add(vacc${M}x${N}c4, wasm_f32x4_mul(wasm_v128_andnot(va${M}, vmask${N}), vb${N}));
}
$for M in range(MR):
const v128_t vacc${M}x01c2 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc${M}x0c4, vacc${M}x1c4, 0, 4, 1, 5),
wasm_v32x4_shuffle(vacc${M}x0c4, vacc${M}x1c4, 2, 6, 3, 7));
$for M in range(0, MR, 2):
v128_t vacc${M}${M+1}x01 = wasm_f32x4_add(
wasm_v32x4_shuffle(vacc${M}x01c2, vacc${M+1}x01c2, 0, 1, 4, 5),
wasm_v32x4_shuffle(vacc${M}x01c2, vacc${M+1}x01c2, 2, 3, 6, 7));
$if ACTIVATION == "MINMAX":
$if X86:
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
$for M in range(0, MR, 2):
vacc${M}${M+1}x01 = wasm_f32x4_pmax(vmin, vacc${M}${M+1}x01);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
$for M in range(0, MR, 2):
vacc${M}${M+1}x01 = wasm_f32x4_pmin(vmax, vacc${M}${M+1}x01);
$else:
$for M in range(0, MR, 2):
vacc${M}${M+1}x01 = wasm_f32x4_max(vacc${M}${M+1}x01, vmin);
$for M in range(0, MR, 2):
vacc${M}${M+1}x01 = wasm_f32x4_min(vacc${M}${M+1}x01, vmax);
$elif ACTIVATION == "RELU":
const v128_t vzero = wasm_i32x4_const_splat(0);
$for M in range(0, MR, 2):
vacc${M}${M+1}x01 = wasm_i32x4_max(vacc${M}${M+1}x01, vzero);
if XNN_LIKELY(nc >= ${NR}) {
$for M in reversed(range(0, MR, 2)):
*((double*) c${M}) = wasm_f64x2_extract_lane(vacc${M}${M+1}x01, 0);
c${M} = (float*) ((uintptr_t) c${M} + cn_stride);
a${M} = (const float*) ((uintptr_t) a${M} - kc);
*((double*) c${M+1}) = wasm_f64x2_extract_lane(vacc${M}${M+1}x01, 1);
c${M+1} = (float*) ((uintptr_t) c${M+1} + cn_stride);
a${M+1} = (const float*) ((uintptr_t) a${M+1} - kc);
nc -= ${NR};
} else {
assert(nc == 1);
$for M in reversed(range(0, MR, 2)):
*c${M} = wasm_f32x4_extract_lane(vacc${M}${M+1}x01, 0);
*c${M+1} = wasm_f32x4_extract_lane(vacc${M}${M+1}x01, 2);
nc = 0;
}
} while (nc != 0);
}