blob: e7ffab1665a3eb4184a95954a646960992521630 [file] [log] [blame]
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/spmm.h>
void xnn_f32_spmm_minmax_ukernel_${MR}x${NR}__scalar_pipelined(
size_t mc,
size_t nc,
const float*restrict input,
const float*restrict weights,
const int32_t*restrict widx_dmap,
const uint32_t*restrict nidx_nnzmap,
float*restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mc != 0);
assert(mc % sizeof(float) == 0);
assert(nc != 0);
const float vmin = params->scalar.min;
const float vmax = params->scalar.max;
size_t output_decrement = output_stride * nc - ${MR} * sizeof(float);
while XNN_LIKELY(mc >= ${MR} * sizeof(float)) {
const float*restrict w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float vw = *w++;
intptr_t diff = *dmap++;
$for M in range(MR):
float vi${ABC[M]} = input[${M}];
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
$for M in range(MR):
float vacc${ABC[M]} = vw;
vw = *w++;
if XNN_LIKELY(nnz != 0) {
do {
$for M in range(MR):
vacc${ABC[M]} += vi${ABC[M]} * vw;
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = *w++;
$for M in range(MR):
vi${ABC[M]} = input[${M}];
} while (--nnz != 0);
}
$for M in range(MR):
float vout${ABC[M]} = math_min_f32(vacc${ABC[M]}, vmax);
$for M in range(MR):
vout${ABC[M]} = math_max_f32(vout${ABC[M]}, vmin);
$for M in range(MR):
output[${M}] = vout${ABC[M]};
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += ${MR};
mc -= ${MR} * sizeof(float);
}
if XNN_UNLIKELY(mc != 0) {
$for LOG2M in reversed(range((MR - 1).bit_length())):
$SUBMR = 1 << LOG2M
$if SUBMR * 2 >= MR:
output_decrement += ${MR - SUBMR} * sizeof(float);
$else:
output_decrement += ${SUBMR} * sizeof(float);
if (mc & (${SUBMR} * sizeof(float))) {
const float*restrict w = weights;
const int32_t* dmap = widx_dmap;
const uint32_t* nnzmap = nidx_nnzmap;
float vw = *w++;
intptr_t diff = *dmap++;
$for M in range(SUBMR):
float vi${ABC[M]} = input[${M}];
size_t n = nc;
do {
uint32_t nnz = *nnzmap++;
$for M in range(SUBMR):
float vacc${ABC[M]} = vw;
vw = *w++;
if XNN_LIKELY(nnz != 0) {
do {
$for M in range(SUBMR):
vacc${ABC[M]} += vi${ABC[M]} * vw;
input = (const float*restrict) ((uintptr_t) input + (uintptr_t) diff);
diff = *dmap++;
vw = *w++;
$for M in range(SUBMR):
vi${ABC[M]} = input[${M}];
} while (--nnz != 0);
}
$for M in range(SUBMR):
float vout${ABC[M]} = math_min_f32(vacc${ABC[M]}, vmax);
$for M in range(SUBMR):
vout${ABC[M]} = math_max_f32(vout${ABC[M]}, vmin);
$for M in range(SUBMR):
output[${M}] = vout${ABC[M]};
output = (float*restrict) ((uintptr_t) output + output_stride);
} while (--n != 0);
output = (float*restrict) ((uintptr_t) output - output_decrement);
input += ${SUBMR};
}
}
}