blob: ca7421a5f5965ccf68ddf6dfb9f9a00e6720754a [file] [log] [blame]
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert DATATYPE in ["QS8", "QU8"]
$assert REQUANTIZATION == "FP32"
$assert BATCH_TILE >= 1
#include <assert.h>
#include <fp16.h>
#include <xnnpack/math.h>
#include <xnnpack/vmul.h>
$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
void xnn_${DATATYPE.lower()}_vmul_minmax_${REQUANTIZATION.lower()}_ukernel__scalar_x${BATCH_TILE}(
size_t n,
const ${XINT8_T}* input_a,
const ${XINT8_T}* input_b,
${XINT8_T}* output,
const union xnn_${DATATYPE.lower()}_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
const int32_t va_zero_point = params->fp32_scalar.a_zero_point;
const int32_t vb_zero_point = params->fp32_scalar.b_zero_point;
const float vscale = params->fp32_scalar.scale;
const float voutput_min_less_zero_point = params->fp32_scalar.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar.magic_bias_less_output_zero_point;
$if BATCH_TILE == 1:
do {
const int32_t va = (int32_t) *input_a++ - va_zero_point;
const int32_t vb = (int32_t) *input_b++ - vb_zero_point;
const int32_t vacc = va * vb;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
const int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (${XINT8_T}) vout;
n -= sizeof(${XINT8_T});
} while (n != 0);
$else:
for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
$for N in range(BATCH_TILE):
const int32_t va${N} = input_a[${N}] - va_zero_point;
input_a += ${BATCH_TILE};
$for N in range(BATCH_TILE):
const int32_t vb${N} = input_b[${N}] - vb_zero_point;
input_b += ${BATCH_TILE};
$for N in range(BATCH_TILE):
const int32_t vacc${N} = va${N} * vb${N};
$for N in range(BATCH_TILE):
float vfpacc${N} = (float) vacc${N} * vscale;
$for N in range(BATCH_TILE):
vfpacc${N} = math_max_f32(vfpacc${N}, voutput_min_less_zero_point);
$for N in range(BATCH_TILE):
vfpacc${N} = math_min_f32(vfpacc${N}, voutput_max_less_zero_point);
$for N in range(BATCH_TILE):
vfpacc${N} += vmagic_bias;
$for N in range(BATCH_TILE):
const int32_t vout${N} = (int32_t) fp32_to_bits(vfpacc${N}) - vmagic_bias_less_output_zero_point;
$for N in range(BATCH_TILE):
output[${N}] = (${XINT8_T}) vout${N};
output += ${BATCH_TILE};
}
if XNN_UNLIKELY(n != 0) {
$if BATCH_TILE == 2:
const int32_t va = (int32_t) *input_a - va_zero_point;
const int32_t vb = (int32_t) *input_b - vb_zero_point;
const int32_t vacc = va * vb;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
const int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output = (${XINT8_T}) vout;
$else:
do {
const int32_t va = (int32_t) *input_a++ - va_zero_point;
const int32_t vb = (int32_t) *input_b++ - vb_zero_point;
const int32_t vacc = va * vb;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
const int32_t vout = (int32_t) fp32_to_bits(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (${XINT8_T}) vout;
n -= sizeof(${XINT8_T});
} while (n != 0);
}
}