blob: 643ff0a6d47aa9fcbef2c73e38985032d380044c [file] [log] [blame]
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert MR % 4 == 0
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/spmm.h>
$ARCH_SUFFIX = "_x86" if X86 else "_arm"
void xnn_f32_spmm_minmax_ukernel_${MR}x${NR}__wasmsimd${ARCH_SUFFIX}_pipelined${"_x" + str(UNROLL) if UNROLL > 1 else ""}(
size_t mc,
size_t nc,
const float*restrict input,
const float*restrict weights,
const int32_t*restrict widx_dmap,
const uint32_t*restrict nidx_nnzmap,
float*restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
size_t output_decrement = output_stride * nc - ${MR} * sizeof(float);
while XNN_LIKELY(mc >= ${MR} * sizeof(float)) {
const float*restrict w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
v128_t vw = wasm_v128_load32_splat(w); w += 1;
intptr_t diff = *dmap++;
$for M in range(0, MR, 4):
v128_t vi${ABC[M:M+4]} = wasm_v128_load(input + ${M});
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
$for M in range(0, MR, 4):
v128_t vacc${ABC[M:M+4]} = vw;
vw = wasm_v128_load32_splat(w); w += 1;
$if UNROLL > 1:
for (; nnz >= ${UNROLL}; nnz -= ${UNROLL}) {
$for K in range(0, UNROLL):
$for M in range(0, MR, 4):
vacc${ABC[M:M+4]} = wasm_f32x4_add(vacc${ABC[M:M+4]}, wasm_f32x4_mul(vi${ABC[M:M+4]}, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
$for M in range(0, MR, 4):
vi${ABC[M:M+4]} = wasm_v128_load(input + ${M});
}
if XNN_LIKELY(nnz != 0) {
do {
$for M in range(0, MR, 4):
vacc${ABC[M:M+4]} = wasm_f32x4_add(vacc${ABC[M:M+4]}, wasm_f32x4_mul(vi${ABC[M:M+4]}, vw));
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = wasm_v128_load32_splat(w); w += 1;
$for M in range(0, MR, 4):
vi${ABC[M:M+4]} = wasm_v128_load(input + ${M});
} while (--nnz != 0);
}
$if X86:
$for M in range(0, MR, 4):
v128_t vout${ABC[M:M+4]} = wasm_f32x4_pmin(vmax, vacc${ABC[M:M+4]});
$for M in range(0, MR, 4):
vout${ABC[M:M+4]} = wasm_f32x4_pmax(vmin, vout${ABC[M:M+4]});
$else:
$for M in range(0, MR, 4):
v128_t vout${ABC[M:M+4]} = wasm_f32x4_min(vacc${ABC[M:M+4]}, vmax);
$for M in range(0, MR, 4):
vout${ABC[M:M+4]} = wasm_f32x4_max(vout${ABC[M:M+4]}, vmin);
wasm_v128_store(output, vout0123);
$for M in range(4, MR, 4):
wasm_v128_store(output + ${M}, vout${ABC[M:M+4]});
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += ${MR};
mc -= ${MR} * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
$for LOG2M in reversed(range((MR - 1).bit_length())):
$SUBMR = 1 << LOG2M
$if SUBMR * 2 >= MR:
output_decrement += ${MR - SUBMR} * sizeof(float);
$else:
output_decrement += ${SUBMR} * sizeof(float);
if (mc & (${SUBMR} * sizeof(float))) {
const float*restrict w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
$if SUBMR == 1:
v128_t vacc0 = wasm_v128_load32_splat(w); w += 1;
$elif SUBMR == 2:
v128_t vacc01 = wasm_v128_load32_splat(w); w += 1;
$else:
v128_t vacc0123 = wasm_v128_load32_splat(w); w += 1;
$for M in range(4, SUBMR, 4):
v128_t vacc${ABC[M:M+4]} = vacc0123;
if XNN_LIKELY(nnz != 0) {
do {
const intptr_t diff = *dmap++;
$if SUBMR >= 4:
const v128_t vi0123 = wasm_v128_load(input);
$elif SUBMR == 2:
const v128_t vi01 = wasm_v128_load64_splat(input);
$elif SUBMR == 1:
const v128_t vi0 = wasm_v128_load32_splat(input);
$for M in range(4, SUBMR, 4):
const v128_t vi${ABC[M:M+4]} = wasm_v128_load(input + ${M});
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
const v128_t vw = wasm_v128_load32_splat(w); w += 1;
$if SUBMR == 1:
vacc${ABC[0]} = wasm_f32x4_add(vacc${ABC[0]}, wasm_f32x4_mul(vi${ABC[0]}, vw));
$else:
$for M in range(0, SUBMR, 4):
vacc${ABC[M:min(M+4,SUBMR)]} = wasm_f32x4_add(vacc${ABC[M:min(M+4,SUBMR)]}, wasm_f32x4_mul(vi${ABC[M:min(M+4,SUBMR)]}, vw));
} while (--nnz != 0);
}
$if SUBMR == 1:
$if X86:
v128_t vout${ABC[0]} = wasm_f32x4_pmin(vmax, vacc${ABC[0]});
vout${ABC[0]} = wasm_f32x4_pmax(vmin, vout${ABC[0]});
$else:
v128_t vout${ABC[0]} = wasm_f32x4_min(vacc${ABC[0]}, vmax);
vout${ABC[0]} = wasm_f32x4_max(vout${ABC[0]}, vmin);
$else:
$if X86:
$for M in range(0, SUBMR, 4):
v128_t vout${ABC[M:min(M+4,SUBMR)]} = wasm_f32x4_pmin(vmax, vacc${ABC[M:min(M+4,SUBMR)]});
$for M in range(0, SUBMR, 4):
vout${ABC[M:min(M+4,SUBMR)]} = wasm_f32x4_pmax(vmin, vout${ABC[M:min(M+4,SUBMR)]});
$else:
$for M in range(0, SUBMR, 4):
v128_t vout${ABC[M:min(M+4,SUBMR)]} = wasm_f32x4_min(vacc${ABC[M:min(M+4,SUBMR)]}, vmax);
$for M in range(0, SUBMR, 4):
vout${ABC[M:min(M+4,SUBMR)]} = wasm_f32x4_max(vout${ABC[M:min(M+4,SUBMR)]}, vmin);
$if SUBMR >= 4:
wasm_v128_store(output, vout0123);
$elif SUBMR == 2:
*((double*) output) = wasm_f64x2_extract_lane(vout01, 0);
$elif SUBMR == 1:
*output = wasm_f32x4_extract_lane(vout0, 0);
$for M in range(4, SUBMR, 4):
wasm_v128_store(output + ${M}, vout${ABC[M:M+4]});
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += ${SUBMR};
}
}
}