blob: bbadb913589f4713726867b9c24775f8ede8c898 [file] [log] [blame]
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
$assert NR % 8 == 0
$assert 8 <= NR <= 16
$assert REQUANTIZATION in ["FP32", "RNDNU"]
$assert not CHANNELWISE or REQUANTIZATION == "FP32"
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
$if REQUANTIZATION == "FP32" and ARMV8:
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
$DATATYPE = "qc8" if CHANNELWISE else "qs8"
$PARAMS_UNION = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
$PARAMS_STRUCT = ("" if CHANNELWISE else REQUANTIZATION.lower() + "_") + ("neonv8" if ARMV8 and REQUANTIZATION == "FP32" else "neon")
$ISA = "neonv8" if ARMV8 else "neon"
void xnn_${DATATYPE}_igemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x${NR}c8__${ISA}_${"mlal" if MLA else "mull"}(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union ${PARAMS_UNION} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= ${MR});
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (${MR} * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
$for M in range(1, MR):
int8_t* c${M} = (int8_t*) ((uintptr_t) c${M-1} + cm_stride);
$if M % 2 == 0:
if XNN_UNPREDICTABLE(mr <= ${M}) {
c${M} = c${M-1};
}
$elif M + 1 == MR:
if XNN_UNPREDICTABLE(mr != ${M+1}) {
c${M} = c${M-1};
}
$else:
if XNN_UNPREDICTABLE(mr < ${M+1}) {
c${M} = c${M-1};
}
do {
$for N in range(NR):
int32x4_t vacc0x${N} = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
$for M in range(1, MR):
$for N in range(NR):
int32x4_t vacc${M}x${N} = vacc0x${N};
size_t p = ks;
do {
$for M in range(MR):
const int8_t* restrict a${M} = a[${M}];
if XNN_UNPREDICTABLE(a${M} != zero) {
a${M} = (const int8_t*) ((uintptr_t) a${M} + a_offset);
}
a += ${MR};
size_t k = kc;
$if MLA:
// 2x partial unrolled loop to load 16 bytes at a time using MLA.
while (k >= 16 * sizeof(int8_t)) {
$for M in range(MR):
const int8x8_t va${M}x0 = vld1_s8(a${M}); a${M} += 8;
const int8x8_t va${M}x1 = vld1_s8(a${M}); a${M} += 8;
$for N in range(NR):
const int8x8_t vb${N}x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
$for N in range(NR):
const int8x8_t vb${N}x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
$for M in range(MR):
int16x8_t vprod${M}x${N} = vmull_s8(vb${N}x0, va${M}x0);
$for M in range(MR):
vprod${M}x${N} = vmlal_s8(vprod${M}x${N}, vb${N}x1, va${M}x1);
$for M in range(MR):
vacc${M}x${N} = vpadalq_s16(vacc${M}x${N}, vprod${M}x${N});
k -= 16 * sizeof(int8_t);
}
// Handle 8 bytes at a time using MUL.
${"if" if MLA else "while"} (k != 0) {
$for M in range(MR):
const int8x8_t va${M} = vld1_s8(a${M}); a${M} += 8;
$for N in range(NR):
const int8x8_t vb${N} = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
$for M in range(MR):
const int16x8_t vprod${M}x${N} = vmull_s8(vb${N}, va${M});
$for M in range(MR):
vacc${M}x${N} = vpadalq_s16(vacc${M}x${N}, vprod${M}x${N});
k -= 8 * sizeof(int8_t);
}
p -= ${MR} * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
$for M in range(MR):
$for N in range(0, NR, 4):
const int32x4_t vsum${M}x${ABC[N:N+2]} = vpaddq_s32(vacc${M}x${N}, vacc${M}x${N+1});
const int32x4_t vsum${M}x${ABC[N+2:N+4]} = vpaddq_s32(vacc${M}x${N+2}, vacc${M}x${N+3});
$for M in range(MR):
$for N in range(0, NR, 4):
int32x4_t vacc${M}x${ABC[N:N+4]} = vpaddq_s32(vsum${M}x${ABC[N:N+2]}, vsum${M}x${ABC[N+2:N+4]});
#else
$for M in range(MR):
$for N in range(0, NR, 4):
const int32x2_t vpsum${M}x${ABC[N]} = vadd_s32(vget_low_s32(vacc${M}x${N}), vget_high_s32(vacc${M}x${N}));
const int32x2_t vpsum${M}x${ABC[N+1]} = vadd_s32(vget_low_s32(vacc${M}x${N+1}), vget_high_s32(vacc${M}x${N+1}));
const int32x2_t vpsum${M}x${ABC[N+2]} = vadd_s32(vget_low_s32(vacc${M}x${N+2}), vget_high_s32(vacc${M}x${N+2}));
const int32x2_t vpsum${M}x${ABC[N+3]} = vadd_s32(vget_low_s32(vacc${M}x${N+3}), vget_high_s32(vacc${M}x${N+3}));
const int32x2_t vsum${M}x${ABC[N:N+2]} = vpadd_s32(vpsum${M}x${ABC[N]}, vpsum${M}x${ABC[N+1]});
const int32x2_t vsum${M}x${ABC[N+2:N+4]} = vpadd_s32(vpsum${M}x${ABC[N+2]}, vpsum${M}x${ABC[N+3]});
int32x4_t vacc${M}x${ABC[N:N+4]} = vcombine_s32(vsum${M}x${ABC[N:N+2]}, vsum${M}x${ABC[N+2:N+4]} );
#endif
$if REQUANTIZATION == "RNDNU":
const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(&params->${PARAMS_STRUCT}.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(&params->${PARAMS_STRUCT}.right_post_shift);
$for M in range(MR):
$for N in range(0, NR, 4):
vacc${M}x${ABC[N:N+4]} = vqshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_pre_shift);
$for M in range(MR):
$for N in range(0, NR, 4):
vacc${M}x${ABC[N:N+4]} = vqdmulhq_s32(vacc${M}x${ABC[N:N+4]}, vmultiplier);
$for M in range(MR):
$for N in range(0, NR, 4):
vacc${M}x${ABC[N:N+4]} = vrshlq_s32(vacc${M}x${ABC[N:N+4]}, vright_post_shift);
$elif REQUANTIZATION == "FP32":
$for M in range(MR):
$for N in range(0, NR, 4):
float32x4_t vfpacc${M}x${ABC[N:N+4]} = vcvtq_f32_s32(vacc${M}x${ABC[N:N+4]});
$if CHANNELWISE:
$for N in range(0, NR, 4):
const float32x4_t vscale${ABC[N:N+4]} = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
$for M in range(MR):
vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale${ABC[N:N+4]});
$else:
const float32x4_t vscale = vld1q_dup_f32(&params->${PARAMS_STRUCT}.scale);
$for M in range(MR):
$for N in range(0, NR, 4):
vfpacc${M}x${ABC[N:N+4]} = vmulq_f32(vfpacc${M}x${ABC[N:N+4]}, vscale);
$if ARMV8:
$for M in range(MR):
$for N in range(0, NR, 4):
vacc${M}x${ABC[N:N+4]} = vcvtnq_s32_f32(vfpacc${M}x${ABC[N:N+4]});
$else:
const float32x4_t vmagic_bias = vld1q_dup_f32(&params->${PARAMS_STRUCT}.magic_bias);
$for M in range(MR):
$for N in range(0, NR, 4):
vacc${M}x${ABC[N:N+4]} = vreinterpretq_s32_f32(vaddq_f32(vfpacc${M}x${ABC[N:N+4]}, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(&params->${PARAMS_STRUCT}.magic_bias_less_output_zero_point);
$for M in range(MR):
$for N in range(0, NR, 4):
vacc${M}x${ABC[N:N+4]} = vqsubq_s32(vacc${M}x${ABC[N:N+4]}, vmagic_bias_less_output_zero_point);
$if REQUANTIZATION != "FP32" or ARMV8:
const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->${PARAMS_STRUCT}.output_zero_point);
#if XNN_ARCH_ARM64
$for M in range(MR):
$for N in range(0, NR, 8):
int16x8_t vacc${M}x${ABC[N:N+8]} = vqmovn_high_s32(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vacc${M}x${ABC[N+4:N+8]});
$if REQUANTIZATION != "FP32" or ARMV8:
$for M in range(MR):
$for N in range(0, NR, 8):
vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vacc${M}x${ABC[N:N+8]}, voutput_zero_point);
$for M in range(MR):
$for N in range(0, NR, 16):
$if N + 8 < NR:
int8x16_t vout${M}x${ABC[N:N+16]} = vqmovn_high_s16(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vacc${M}x${ABC[N+8:N+16]});
$elif M % 2 == 1:
int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vqmovn_high_s16(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vacc${M}x${ABC[N:N+8]});
$elif M + 1 == MR:
int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]});
#else
$for M in range(MR):
$for N in range(0, NR, 8):
int16x8_t vacc${M}x${ABC[N:N+8]} = vcombine_s16(vqmovn_s32(vacc${M}x${ABC[N:N+4]}), vqmovn_s32(vacc${M}x${ABC[N+4:N+8]}));
$if REQUANTIZATION != "FP32" or ARMV8:
$for M in range(MR):
$for N in range(0, NR, 8):
vacc${M}x${ABC[N:N+8]} = vqaddq_s16(vacc${M}x${ABC[N:N+8]}, voutput_zero_point);
$for M in range(MR):
$for N in range(0, NR, 16):
$if N + 8 < NR:
int8x16_t vout${M}x${ABC[N:N+16]} = vcombine_s8(vqmovn_s16(vacc${M}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N+8:N+16]}));
$elif M % 2 == 1:
int8x16_t vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vcombine_s8(vqmovn_s16(vacc${M-1}x${ABC[N:N+8]}), vqmovn_s16(vacc${M}x${ABC[N:N+8]}));
$elif M + 1 == MR:
int8x8_t vout${M}x${ABC[N:N+8]} = vqmovn_s16(vacc${M}x${ABC[N:N+8]});
#endif
$if NR == 8 and MR == 1:
const int8x8_t voutput_min = vld1_dup_s8(&params->${PARAMS_STRUCT}.output_min);
$else:
const int8x16_t voutput_min = vld1q_dup_s8(&params->${PARAMS_STRUCT}.output_min);
$for M in range(MR):
$for N in range(0, NR, 16):
$if N + 8 < NR:
vout${M}x${ABC[N:N+16]} = vmaxq_s8(vout${M}x${ABC[N:N+16]}, voutput_min);
$elif M % 2 == 1:
vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vmaxq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_min);
$elif M + 1 == MR:
$if NR == 8 and MR == 1:
vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, voutput_min);
$else:
vout${M}x${ABC[N:N+8]} = vmax_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_min));
$if NR == 8 and MR == 1:
const int8x8_t voutput_max = vld1_dup_s8(&params->${PARAMS_STRUCT}.output_max);
$else:
const int8x16_t voutput_max = vld1q_dup_s8(&params->${PARAMS_STRUCT}.output_max);
$for M in range(MR):
$for N in range(0, NR, 16):
$if N + 8 < NR:
vout${M}x${ABC[N:N+16]} = vminq_s8(vout${M}x${ABC[N:N+16]}, voutput_max);
$elif M % 2 == 1:
vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]} = vminq_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}, voutput_max);
$elif M + 1 == MR:
$if NR == 8 and MR == 1:
vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, voutput_max);
$else:
vout${M}x${ABC[N:N+8]} = vmin_s8(vout${M}x${ABC[N:N+8]}, vget_low_s8(voutput_max));
if (nc >= ${NR}) {
$for M in reversed(range(MR)):
$for N in range(0, NR, 16):
$if N + 8 < NR:
vst1q_s8(c${M} + ${N}, vout${M}x${ABC[N:N+16]});
$elif M % 2 == 1:
vst1_s8(c${M} + ${N}, vget_high_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
vst1_s8(c${M-1} + ${N}, vget_low_s8(vout${M-1}x${ABC[N:N+8]}_${M}x${ABC[N:N+8]}));
$elif M + 1 == MR:
vst1_s8(c${M} + ${N}, vout${M}x${ABC[N:N+8]});
$for M in reversed(range(MR)):
c${M} = (int8_t*) ((uintptr_t) c${M} + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= ${NR};
} else {
$if NR == 16:
$for M in reversed(range(MR)):
$if M % 2 == 1:
int8x16_t vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_low_s8(vout${M-1}x0123456789ABCDEF), vget_low_s8(vout${M}x0123456789ABCDEF));
$elif M + 1 == MR:
int8x8_t vout${M}x01234567 = vget_low_s8(vout${M}x0123456789ABCDEF);
if (nc & 8) {
$for M in reversed(range(MR)):
$if M % 2 == 1:
vst1_s8(c${M}, vget_high_s8(vout${M-1}x01234567_${M}x01234567)); c${M} += 8;
vst1_s8(c${M-1}, vget_low_s8(vout${M-1}x01234567_${M}x01234567)); c${M-1} += 8;
$elif M + 1 == MR:
vst1_s8(c${M}, vout${M}x01234567); c${M} += 8;
$for M in reversed(range(MR)):
$if M % 2 == 1:
vout${M-1}x01234567_${M}x01234567 = vcombine_s8(vget_high_s8(vout${M-1}x0123456789ABCDEF), vget_high_s8(vout${M}x0123456789ABCDEF));
$elif M + 1 == MR:
vout${M}x01234567 = vget_high_s8(vout${M}x0123456789ABCDEF);
}
if (nc & 4) {
$for M in reversed(range(MR)):
$if M % 2 == 1:
vst1q_lane_u32((void*) c${M}, vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 2); c${M} += 4;
vst1q_lane_u32((void*) c${M-1}, vreinterpretq_u32_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 4;
$elif M + 1 == MR:
vst1_lane_u32((void*) c${M}, vreinterpret_u32_s8(vout${M}x01234567), 0); c${M} += 4;
$for M in reversed(range(MR)):
$if M % 2 == 1:
vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 4);
$elif M + 1 == MR:
vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 4);
}
if (nc & 2) {
$for M in reversed(range(MR)):
$if M % 2 == 1:
vst1q_lane_u16((void*) c${M}, vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 4); c${M} += 2;
vst1q_lane_u16((void*) c${M-1}, vreinterpretq_u16_s8(vout${M-1}x01234567_${M}x01234567), 0); c${M-1} += 2;
$elif M + 1 == MR:
vst1_lane_u16((void*) c${M}, vreinterpret_u16_s8(vout${M}x01234567), 0); c${M} += 2;
$for M in reversed(range(MR)):
$if M % 2 == 1:
vout${M-1}x01234567_${M}x01234567 = vextq_s8(vout${M-1}x01234567_${M}x01234567, vout${M-1}x01234567_${M}x01234567, 2);
$elif M + 1 == MR:
vout${M}x01234567 = vext_s8(vout${M}x01234567, vout${M}x01234567, 2);
}
if (nc & 1) {
$for M in reversed(range(MR)):
$if M % 2 == 1:
vst1q_lane_s8(c${M}, vout${M-1}x01234567_${M}x01234567, 8);
vst1q_lane_s8(c${M-1}, vout${M-1}x01234567_${M}x01234567, 0);
$elif M + 1 == MR:
vst1_lane_s8(c${M}, vout${M}x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}