blob: 34b11cd242653359a3dc28b06cce0116be3ab010 [file] [log] [blame]
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx8c8-avx2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_1x8c8__avx2(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8);
uint8_t* c0 = c;
do {
const __m128i vbias0x0 = _mm_loadu_si32(w);
const __m128i vbias0x1 = _mm_loadu_si32((const int32_t*) w + 1);
__m256i vacc0x01 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x0), vbias0x1, 1);
const __m128i vbias0x2 = _mm_loadu_si32((const int32_t*) w + 2);
const __m128i vbias0x3 = _mm_loadu_si32((const int32_t*) w + 3);
__m256i vacc0x23 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x2), vbias0x3, 1);
const __m128i vbias0x4 = _mm_loadu_si32((const int32_t*) w + 4);
const __m128i vbias0x5 = _mm_loadu_si32((const int32_t*) w + 5);
__m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1);
const __m128i vbias0x6 = _mm_loadu_si32((const int32_t*) w + 6);
const __m128i vbias0x7 = _mm_loadu_si32((const int32_t*) w + 7);
__m256i vacc0x67 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x6), vbias0x7, 1);
w = (const void*) ((const int32_t*) w + 8);
size_t p = ks;
const __m256i vb_zero_point = _mm256_load_si256((const __m256i*) params->fp32_avx2.kernel_zero_point);
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a0));
const __m256i vxa0 = _mm256_cvtepu8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m256i vxb01 = _mm256_sub_epi16(_mm256_cvtepu8_epi16(vb01), vb_zero_point);
vacc0x01 = _mm256_add_epi32(vacc0x01, _mm256_madd_epi16(vxa0, vxb01));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m256i vxb23 = _mm256_sub_epi16(_mm256_cvtepu8_epi16(vb23), vb_zero_point);
vacc0x23 = _mm256_add_epi32(vacc0x23, _mm256_madd_epi16(vxa0, vxb23));
const __m128i vb45 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 32));
const __m256i vxb45 = _mm256_sub_epi16(_mm256_cvtepu8_epi16(vb45), vb_zero_point);
vacc0x45 = _mm256_add_epi32(vacc0x45, _mm256_madd_epi16(vxa0, vxb45));
const __m128i vb67 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 48));
const __m256i vxb67 = _mm256_sub_epi16(_mm256_cvtepu8_epi16(vb67), vb_zero_point);
vacc0x67 = _mm256_add_epi32(vacc0x67, _mm256_madd_epi16(vxa0, vxb67));
w = (const void*) ((const uint8_t*) w + 64);
k += 8 * sizeof(uint8_t);
}
p -= 1 * sizeof(void*);
} while (p != 0);
const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23);
const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67);
const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657);
const __m256i vpermute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0);
__m256i vacc0x01234567 = _mm256_permutevar8x32_epi32(vacc0x02461357, vpermute_mask);
__m256 vscaled0x01234567 = _mm256_cvtepi32_ps(vacc0x01234567);
const __m256 vscale = _mm256_load_ps(params->fp32_avx2.scale);
vscaled0x01234567 = _mm256_mul_ps(vscaled0x01234567, vscale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->fp32_avx2.output_max_less_zero_point);
vscaled0x01234567 = _mm256_min_ps(vscaled0x01234567, voutput_max_less_zero_point);
vacc0x01234567 = _mm256_cvtps_epi32(vscaled0x01234567);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->fp32_avx2.output_zero_point);
__m256i vacc00x01234567 = _mm256_adds_epi16(_mm256_packs_epi32(vacc0x01234567, vacc0x01234567), voutput_zero_point);
vacc00x01234567 = _mm256_permute4x64_epi64(vacc00x01234567, _MM_SHUFFLE(3, 1, 2, 0));
__m256i vout = _mm256_packus_epi16(vacc00x01234567, vacc00x01234567);
vout = _mm256_max_epu8(vout, _mm256_load_si256((const __m256i*) params->fp32_avx2.output_min));
__m128i vout_lo = _mm256_castsi256_si128(vout);
__m128i vout_hi = _mm256_extracti128_si256(vout, 1);
if (nc >= 8) {
_mm_storel_epi64((__m128i*) c0, vout_lo);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_si32(c0, vout_lo);
c0 += 4;
vout_lo = _mm_srli_epi64(vout_lo, 32);
vout_hi = _mm_srli_epi64(vout_hi, 32);
}
if (nc & 2) {
*((uint16_t*) c0) = (uint16_t) _mm_extract_epi16(vout_lo, 0);
c0 += 2;
vout_lo = _mm_srli_epi32(vout_lo, 16);
vout_hi = _mm_srli_epi32(vout_hi, 16);
}
if (nc & 1) {
*c0 = (uint8_t) _mm_extract_epi8(vout_lo, 0);
}
nc = 0;
}
} while (nc != 0);
}