QU8 DWCONV microkernels for AVX512
PiperOrigin-RevId: 383799081
diff --git a/BUILD.bazel b/BUILD.bazel
index 91542cf..772eb3d 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -4305,6 +4305,10 @@
"src/qs8-igemm/gen/3x16c8-minmax-gemmlowp-avx512skx.c",
"src/qs8-igemm/gen/4x16c8-minmax-fp32-avx512skx.c",
"src/qs8-igemm/gen/4x16c8-minmax-gemmlowp-avx512skx.c",
+ "src/qu8-dwconv/gen/up16x9-minmax-fp32-avx512skx-mul32.c",
+ "src/qu8-dwconv/gen/up16x25-minmax-fp32-avx512skx-mul32.c",
+ "src/qu8-dwconv/gen/up32x9-minmax-fp32-avx512skx-mul32.c",
+ "src/qu8-dwconv/gen/up32x25-minmax-fp32-avx512skx-mul32.c",
"src/qu8-gemm/gen/1x16c8-minmax-fp32-avx512skx.c",
"src/qu8-gemm/gen/2x16c8-minmax-fp32-avx512skx.c",
"src/qu8-gemm/gen/3x16c8-minmax-fp32-avx512skx.c",
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a94f20c..d5c52b9 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -3499,6 +3499,10 @@
src/qs8-igemm/gen/3x16c8-minmax-gemmlowp-avx512skx.c
src/qs8-igemm/gen/4x16c8-minmax-fp32-avx512skx.c
src/qs8-igemm/gen/4x16c8-minmax-gemmlowp-avx512skx.c
+ src/qu8-dwconv/gen/up16x9-minmax-fp32-avx512skx-mul32.c
+ src/qu8-dwconv/gen/up16x25-minmax-fp32-avx512skx-mul32.c
+ src/qu8-dwconv/gen/up32x9-minmax-fp32-avx512skx-mul32.c
+ src/qu8-dwconv/gen/up32x25-minmax-fp32-avx512skx-mul32.c
src/qu8-gemm/gen/1x16c8-minmax-fp32-avx512skx.c
src/qu8-gemm/gen/2x16c8-minmax-fp32-avx512skx.c
src/qu8-gemm/gen/3x16c8-minmax-fp32-avx512skx.c
diff --git a/scripts/generate-qs8-dwconv.sh b/scripts/generate-qs8-dwconv.sh
index f8bcd61..5093906 100755
--- a/scripts/generate-qs8-dwconv.sh
+++ b/scripts/generate-qs8-dwconv.sh
@@ -371,20 +371,26 @@
tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D DATATYPE=QS8 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up16x9-minmax-gemmlowp-avx512skx-mul32.c
tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=9 -D DATATYPE=QS8 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up32x9-minmax-gemmlowp-avx512skx-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D DATATYPE=QC8 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up16x9-minmax-fp32-avx512skx-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=9 -D DATATYPE=QC8 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up32x9-minmax-fp32-avx512skx-mul32.c
+
tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D DATATYPE=QS8 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up16x9-minmax-fp32-avx512skx-mul32.c
tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=9 -D DATATYPE=QS8 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up32x9-minmax-fp32-avx512skx-mul32.c
-tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D DATATYPE=QC8 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up16x9-minmax-fp32-avx512skx-mul32.c
-tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=9 -D DATATYPE=QC8 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up32x9-minmax-fp32-avx512skx-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D DATATYPE=QU8 -D REQUANTIZATION=FP32 -o src/qu8-dwconv/gen/up16x9-minmax-fp32-avx512skx-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=9 -D DATATYPE=QU8 -D REQUANTIZATION=FP32 -o src/qu8-dwconv/gen/up32x9-minmax-fp32-avx512skx-mul32.c
tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=25 -D DATATYPE=QS8 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up16x25-minmax-gemmlowp-avx512skx-mul32.c
tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=25 -D DATATYPE=QS8 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up32x25-minmax-gemmlowp-avx512skx-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=25 -D DATATYPE=QC8 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up16x25-minmax-fp32-avx512skx-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=25 -D DATATYPE=QC8 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up32x25-minmax-fp32-avx512skx-mul32.c
+
tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=25 -D DATATYPE=QS8 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up16x25-minmax-fp32-avx512skx-mul32.c
tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=25 -D DATATYPE=QS8 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up32x25-minmax-fp32-avx512skx-mul32.c
-tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=25 -D DATATYPE=QC8 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up16x25-minmax-fp32-avx512skx-mul32.c
-tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=25 -D DATATYPE=QC8 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up32x25-minmax-fp32-avx512skx-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=25 -D DATATYPE=QU8 -D REQUANTIZATION=FP32 -o src/qu8-dwconv/gen/up16x25-minmax-fp32-avx512skx-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=25 -D DATATYPE=QU8 -D REQUANTIZATION=FP32 -o src/qu8-dwconv/gen/up32x25-minmax-fp32-avx512skx-mul32.c
################################## Unit tests #################################
tools/generate-dwconv-test.py --spec test/qs8-dwconv-minmax-gemmlowp.yaml --output test/qs8-dwconv-minmax-gemmlowp.cc
diff --git a/src/qs8-dwconv/unipass-avx512skx-mul32.c.in b/src/qs8-dwconv/unipass-avx512skx-mul32.c.in
index 5d45f17..0f4d614 100644
--- a/src/qs8-dwconv/unipass-avx512skx-mul32.c.in
+++ b/src/qs8-dwconv/unipass-avx512skx-mul32.c.in
@@ -5,7 +5,7 @@
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
$assert REQUANTIZATION in ["GEMMLOWP", "FP32"]
-$assert DATATYPE in ["QC8", "QS8"]
+$assert DATATYPE in ["QC8", "QS8", "QU8"]
$assert DATATYPE != "QC8" or REQUANTIZATION == "FP32"
$assert CHANNEL_TILE % 16 == 0
$assert CHANNEL_TILE >= 16
@@ -19,17 +19,25 @@
$PARAMS_STRUCT = "avx512" if DATATYPE == "QC8" else REQUANTIZATION.lower() + "_avx512"
-$PARAMS_UNION = "xnn_qs8_minmax_params" if DATATYPE == "QC8" else "xnn_qs8_conv_minmax_params"
+$PARAMS_UNION = "xnn_qs8_minmax_params" if DATATYPE == "QC8" else "xnn_%s_conv_minmax_params" % DATATYPE.lower()
+$XINT8_T = "uint8_t" if DATATYPE == "QU8" else "int8_t"
+$_MM512_CVTEPX8_EPI32 = "_mm512_cvtepu8_epi32" if DATATYPE == "QU8" else "_mm512_cvtepi8_epi32"
+$_MM256_PACKXS_EPI16 = "_mm256_packus_epi16" if DATATYPE == "QU8" else "_mm256_packs_epi16"
+$_MM_PACKXS_EPI16 = "_mm_packus_epi16" if DATATYPE == "QU8" else "_mm_packs_epi16"
+$_MM256_MIN_EPX8 = "_mm256_min_epu8" if DATATYPE == "QU8" else "_mm256_min_epi8"
+$_MM256_MAX_EPX8 = "_mm256_max_epu8" if DATATYPE == "QU8" else "_mm256_max_epi8"
+$_MM_MIN_EPX8 = "_mm_min_epu8" if DATATYPE == "QU8" else "_mm_min_epi8"
+$_MM_MAX_EPX8 = "_mm_max_epu8" if DATATYPE == "QU8" else "_mm_max_epi8"
void xnn_${DATATYPE.lower()}_dwconv_minmax_${REQUANTIZATION.lower()}_ukernel_up${CHANNEL_TILE}x${KERNEL_TILE}__avx512skx_mul32(
size_t channels,
size_t output_width,
- const int8_t** input,
+ const ${XINT8_T}** input,
const void* weights,
- int8_t* output,
+ ${XINT8_T}* output,
size_t input_stride,
size_t output_increment,
size_t input_offset,
- const int8_t* zero,
+ const ${XINT8_T}* zero,
const union ${PARAMS_UNION} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
{
assert(channels != 0);
@@ -55,14 +63,16 @@
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_max);
+ $if DATATYPE == "QU8":
+ const __m512i vk_zero_point = _mm512_cvtepu16_epi32(_mm256_load_si256((const __m256i*) params->${PARAMS_STRUCT}.kernel_zero_point));
do {
$for K in range(KERNEL_TILE):
- const int8_t* i${K} = input[${K}];
+ const ${XINT8_T}* i${K} = input[${K}];
assert(i${K} != NULL);
if XNN_UNPREDICTABLE(i${K} != zero) {
- i${K} = (const int8_t*) ((uintptr_t) i${K} + input_offset);
+ i${K} = (const ${XINT8_T}*) ((uintptr_t) i${K} + input_offset);
}
- input = (const int8_t**) ((uintptr_t) input + input_stride);
+ input = (const ${XINT8_T}**) ((uintptr_t) input + input_stride);
size_t c = channels;
const void* w = weights;
@@ -75,16 +85,19 @@
$for C in range(0, CHANNEL_TILE, 16):
$if C == 0:
- const __m512i vi${K}x${ABC[0:16]} = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i${K}));
+ const __m512i vi${K}x${ABC[0:16]} = ${_MM512_CVTEPX8_EPI32}(_mm_loadu_si128((const __m128i*) i${K}));
$else:
- const __m512i vi${K}x${ABC[C:C+16]} = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) (i${K} + ${C})));
- const __m512i vk${K}x${ABC[C:C+16]} = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${K * CHANNEL_TILE + C} * sizeof(int8_t))));
+ const __m512i vi${K}x${ABC[C:C+16]} = ${_MM512_CVTEPX8_EPI32}(_mm_loadu_si128((const __m128i*) (i${K} + ${C})));
+ $if DATATYPE == "QU8":
+ const __m512i vk${K}x${ABC[C:C+16]} = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${K * CHANNEL_TILE + C} * sizeof(${XINT8_T})))), vk_zero_point);
+ $else:
+ const __m512i vk${K}x${ABC[C:C+16]} = _mm512_cvtepi8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${K * CHANNEL_TILE + C} * sizeof(${XINT8_T}))));
i${K} += ${CHANNEL_TILE};
$for C in range(0, CHANNEL_TILE, 16):
vacc${ABC[C:C+16]} = _mm512_add_epi32(vacc${ABC[C:C+16]}, _mm512_mullo_epi32(vi${K}x${ABC[C:C+16]}, vk${K}x${ABC[C:C+16]}));
- w = (const void*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${KERNEL_TILE * CHANNEL_TILE} * sizeof(int8_t));
+ w = (const void*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${KERNEL_TILE * CHANNEL_TILE} * sizeof(${XINT8_T}));
$if REQUANTIZATION == "GEMMLOWP":
$for C in range(0, CHANNEL_TILE, 16):
@@ -141,23 +154,23 @@
$if C + 16 < CHANNEL_TILE:
const __m256i vout${ABC[C:C+4]}${ABC[C+16:C+20]}${ABC[C+4:C+8]}${ABC[C+20:C+24]} = _mm512_castsi512_si256(vout${ABC[C:C+4]}${ABC[C+16:C+20]}${ABC[C+4:C+8]}${ABC[C+20:C+24]}${ABC[C+8:C+12]}${ABC[C+24:C+28]}${ABC[C+12:C+16]}${ABC[C+28:C+32]});
const __m256i vout${ABC[C+8:C+12]}${ABC[C+24:C+28]}${ABC[C+12:C+16]}${ABC[C+28:C+32]} = _mm512_extracti32x8_epi32(vout${ABC[C:C+4]}${ABC[C+16:C+20]}${ABC[C+4:C+8]}${ABC[C+20:C+24]}${ABC[C+8:C+12]}${ABC[C+24:C+28]}${ABC[C+12:C+16]}${ABC[C+28:C+32]}, 1);
- const __m256i vout${ABC[C:C+4]}${ABC[C+16:C+20]}${ABC[C+8:C+12]}${ABC[C+24:C+28]}${ABC[C+4:C+8]}${ABC[C+20:C+24]}${ABC[C+12:C+16]}${ABC[C+28:C+32]} = _mm256_packs_epi16(vout${ABC[C:C+4]}${ABC[C+16:C+20]}${ABC[C+4:C+8]}${ABC[C+20:C+24]}, vout${ABC[C+8:C+12]}${ABC[C+24:C+28]}${ABC[C+12:C+16]}${ABC[C+28:C+32]});
+ const __m256i vout${ABC[C:C+4]}${ABC[C+16:C+20]}${ABC[C+8:C+12]}${ABC[C+24:C+28]}${ABC[C+4:C+8]}${ABC[C+20:C+24]}${ABC[C+12:C+16]}${ABC[C+28:C+32]} = ${_MM256_PACKXS_EPI16}(vout${ABC[C:C+4]}${ABC[C+16:C+20]}${ABC[C+4:C+8]}${ABC[C+20:C+24]}, vout${ABC[C+8:C+12]}${ABC[C+24:C+28]}${ABC[C+12:C+16]}${ABC[C+28:C+32]});
__m256i vout${ABC[C:C+32]} = _mm256_permutevar8x32_epi32(vout${ABC[C:C+4]}${ABC[C+16:C+20]}${ABC[C+8:C+12]}${ABC[C+24:C+28]}${ABC[C+4:C+8]}${ABC[C+20:C+24]}${ABC[C+12:C+16]}${ABC[C+28:C+32]}, vpermute_mask);
$else:
const __m128i vout${ABC[C:C+4]}${ABC[C+8:C+12]} = _mm256_castsi256_si128(vout${ABC[C:C+4]}${ABC[C+8:C+12]}${ABC[C+4:C+8]}${ABC[C+12:C+16]});
const __m128i vout${ABC[C+4:C+8]}${ABC[C+12:C+16]} = _mm256_extracti128_si256(vout${ABC[C:C+4]}${ABC[C+8:C+12]}${ABC[C+4:C+8]}${ABC[C+12:C+16]}, 1);
- __m128i vout${ABC[C:C+16]} = _mm_shuffle_epi32(_mm_packs_epi16(vout${ABC[C:C+4]}${ABC[C+8:C+12]}, vout${ABC[C+4:C+8]}${ABC[C+12:C+16]}), _MM_SHUFFLE(3, 1, 2, 0));
+ __m128i vout${ABC[C:C+16]} = _mm_shuffle_epi32(${_MM_PACKXS_EPI16}(vout${ABC[C:C+4]}${ABC[C+8:C+12]}, vout${ABC[C+4:C+8]}${ABC[C+12:C+16]}), _MM_SHUFFLE(3, 1, 2, 0));
$for C in range(0, CHANNEL_TILE, 16):
$if C + 16 < CHANNEL_TILE:
- vout${ABC[C:C+32]} = _mm256_max_epi8(vout${ABC[C:C+32]}, voutput_min);
- vout${ABC[C:C+32]} = _mm256_min_epi8(vout${ABC[C:C+32]}, voutput_max);
+ vout${ABC[C:C+32]} = ${_MM256_MAX_EPX8}(vout${ABC[C:C+32]}, voutput_min);
+ vout${ABC[C:C+32]} = ${_MM256_MIN_EPX8}(vout${ABC[C:C+32]}, voutput_max);
$elif CHANNEL_TILE > 16:
- vout${ABC[C:C+16]} = _mm_max_epi8(vout${ABC[C:C+16]}, _mm256_castsi256_si128(voutput_min));
- vout${ABC[C:C+16]} = _mm_min_epi8(vout${ABC[C:C+16]}, _mm256_castsi256_si128(voutput_max));
+ vout${ABC[C:C+16]} = ${_MM_MAX_EPX8}(vout${ABC[C:C+16]}, _mm256_castsi256_si128(voutput_min));
+ vout${ABC[C:C+16]} = ${_MM_MIN_EPX8}(vout${ABC[C:C+16]}, _mm256_castsi256_si128(voutput_max));
$else:
- vout${ABC[C:C+16]} = _mm_max_epi8(vout${ABC[C:C+16]}, voutput_min);
- vout${ABC[C:C+16]} = _mm_min_epi8(vout${ABC[C:C+16]}, voutput_max);
+ vout${ABC[C:C+16]} = ${_MM_MAX_EPX8}(vout${ABC[C:C+16]}, voutput_min);
+ vout${ABC[C:C+16]} = ${_MM_MIN_EPX8}(vout${ABC[C:C+16]}, voutput_max);
$if CHANNEL_TILE > 16:
_mm256_storeu_si256((__m256i*) output, vout${ABC[0:32]});
@@ -174,20 +187,29 @@
// Prepare mask for valid 8-bit elements (depends on nc).
const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << (c & 15)) - UINT32_C(1)));
$if CHANNEL_TILE > 16:
- const int8_t* k = (const int8_t*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t));
+ const ${XINT8_T}* k = (const ${XINT8_T}*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t));
${"do " if CHANNEL_TILE > 16 else ""}{
__m512i vacc${ABC[0:16]} = _mm512_loadu_si512(w);
$for K in range(KERNEL_TILE):
- const __m512i vi${K}x${ABC[0:16]} = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) i${K}));
- $if CHANNEL_TILE > 16:
- $if K == 0:
- const __m512i vk${K}x${ABC[0:16]} = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) k));
+ const __m512i vi${K}x${ABC[0:16]} = ${_MM512_CVTEPX8_EPI32}(_mm_loadu_si128((const __m128i*) i${K}));
+ $if DATATYPE == "QU8":
+ $if CHANNEL_TILE > 16:
+ $if K == 0:
+ const __m512i vk${K}x${ABC[0:16]} = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) k)), vk_zero_point);
+ $else:
+ const __m512i vk${K}x${ABC[0:16]} = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + ${K * CHANNEL_TILE}))), vk_zero_point);
$else:
- const __m512i vk${K}x${ABC[0:16]} = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) (k + ${K * CHANNEL_TILE})));
+ const __m512i vk${K}x${ABC[0:16]} = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${K * CHANNEL_TILE} * sizeof(${XINT8_T})))), vk_zero_point);
$else:
- const __m512i vk${K}x${ABC[0:16]} = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${K * CHANNEL_TILE} * sizeof(int8_t))));
+ $if CHANNEL_TILE > 16:
+ $if K == 0:
+ const __m512i vk${K}x${ABC[0:16]} = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) k));
+ $else:
+ const __m512i vk${K}x${ABC[0:16]} = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) (k + ${K * CHANNEL_TILE})));
+ $else:
+ const __m512i vk${K}x${ABC[0:16]} = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${K * CHANNEL_TILE} * sizeof(${XINT8_T}))));
$if CHANNEL_TILE > 16:
i${K} += 16;
@@ -214,7 +236,7 @@
$elif REQUANTIZATION == "FP32":
__m512 vscaled${ABC[0:16]} = _mm512_cvtepi32_ps(vacc${ABC[0:16]});
$if DATATYPE == "QC8":
- const __m512 vscale${ABC[0:16]} = _mm512_loadu_ps((const void*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${CHANNEL_TILE * KERNEL_TILE} * sizeof(int8_t)));
+ const __m512 vscale${ABC[0:16]} = _mm512_loadu_ps((const void*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${CHANNEL_TILE * KERNEL_TILE} * sizeof(${XINT8_T})));
vscaled${ABC[0:16]} = _mm512_mul_ps(vscaled${ABC[0:16]}, vscale${ABC[0:16]});
$else:
vscaled${ABC[0:16]} = _mm512_mul_ps(vscaled${ABC[0:16]}, vscale);
@@ -230,13 +252,13 @@
const __m128i vout${ABC[0:4]}${ABC[8:12]} = _mm256_castsi256_si128(vout${ABC[0:4]}${ABC[8:12]}${ABC[4:8]}${ABC[12:16]});
const __m128i vout${ABC[4:8]}${ABC[12:16]} = _mm256_extracti128_si256(vout${ABC[0:4]}${ABC[8:12]}${ABC[4:8]}${ABC[12:16]}, 1);
- __m128i vout${ABC[0:16]} = _mm_shuffle_epi32(_mm_packs_epi16(vout${ABC[0:4]}${ABC[8:12]}, vout${ABC[4:8]}${ABC[12:16]}), _MM_SHUFFLE(3, 1, 2, 0));
+ __m128i vout${ABC[0:16]} = _mm_shuffle_epi32(${_MM_PACKXS_EPI16}(vout${ABC[0:4]}${ABC[8:12]}, vout${ABC[4:8]}${ABC[12:16]}), _MM_SHUFFLE(3, 1, 2, 0));
$if CHANNEL_TILE > 16:
- vout${ABC[0:16]} = _mm_max_epi8(vout${ABC[0:16]}, _mm256_castsi256_si128(voutput_min));
- vout${ABC[0:16]} = _mm_min_epi8(vout${ABC[0:16]}, _mm256_castsi256_si128(voutput_max));
+ vout${ABC[0:16]} = ${_MM_MAX_EPX8}(vout${ABC[0:16]}, _mm256_castsi256_si128(voutput_min));
+ vout${ABC[0:16]} = ${_MM_MIN_EPX8}(vout${ABC[0:16]}, _mm256_castsi256_si128(voutput_max));
$else:
- vout${ABC[0:16]} = _mm_max_epi8(vout${ABC[0:16]}, voutput_min);
- vout${ABC[0:16]} = _mm_min_epi8(vout${ABC[0:16]}, voutput_max);
+ vout${ABC[0:16]} = ${_MM_MAX_EPX8}(vout${ABC[0:16]}, voutput_min);
+ vout${ABC[0:16]} = ${_MM_MIN_EPX8}(vout${ABC[0:16]}, voutput_max);
$if CHANNEL_TILE > 16:
if XNN_LIKELY(c >= 16) {
@@ -245,15 +267,15 @@
c -= 16;
} else {
_mm_mask_storeu_epi8(output, vmask, vout${ABC[0:16]});
- output = (int8_t*) ((uintptr_t) output + c);
+ output = (${XINT8_T}*) ((uintptr_t) output + c);
c = 0;
}
$else:
_mm_mask_storeu_epi8(output, vmask, vout${ABC[0:16]});
- output = (int8_t*) ((uintptr_t) output + c);
+ output = (${XINT8_T}*) ((uintptr_t) output + c);
}${" while (c != 0);" if CHANNEL_TILE > 16 else ""}
}
- output = (int8_t*) ((uintptr_t) output + output_increment);
+ output = (${XINT8_T}*) ((uintptr_t) output + output_increment);
} while (--output_width != 0);
}
diff --git a/src/qu8-dwconv/gen/up16x25-minmax-fp32-avx512skx-mul32.c b/src/qu8-dwconv/gen/up16x25-minmax-fp32-avx512skx-mul32.c
new file mode 100644
index 0000000..2cedea0
--- /dev/null
+++ b/src/qu8-dwconv/gen/up16x25-minmax-fp32-avx512skx-mul32.c
@@ -0,0 +1,496 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/unipass-avx512skx-mul32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/intrinsics-polyfill.h>
+
+
+void xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32(
+ size_t channels,
+ size_t output_width,
+ const uint8_t** input,
+ const void* weights,
+ uint8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const uint8_t* zero,
+ const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ const __m512 vscale = _mm512_load_ps(params->fp32_avx512.scale);
+ const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->fp32_avx512.output_zero_point);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_avx512.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_avx512.output_max);
+
+ const __m512i vk_zero_point = _mm512_cvtepu16_epi32(_mm256_load_si256((const __m256i*) params->fp32_avx512.kernel_zero_point));
+ do {
+ const uint8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const uint8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const uint8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const uint8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const uint8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const uint8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const uint8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const uint8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const uint8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ const uint8_t* i9 = input[9];
+ assert(i9 != NULL);
+ if XNN_UNPREDICTABLE(i9 != zero) {
+ i9 = (const uint8_t*) ((uintptr_t) i9 + input_offset);
+ }
+ const uint8_t* i10 = input[10];
+ assert(i10 != NULL);
+ if XNN_UNPREDICTABLE(i10 != zero) {
+ i10 = (const uint8_t*) ((uintptr_t) i10 + input_offset);
+ }
+ const uint8_t* i11 = input[11];
+ assert(i11 != NULL);
+ if XNN_UNPREDICTABLE(i11 != zero) {
+ i11 = (const uint8_t*) ((uintptr_t) i11 + input_offset);
+ }
+ const uint8_t* i12 = input[12];
+ assert(i12 != NULL);
+ if XNN_UNPREDICTABLE(i12 != zero) {
+ i12 = (const uint8_t*) ((uintptr_t) i12 + input_offset);
+ }
+ const uint8_t* i13 = input[13];
+ assert(i13 != NULL);
+ if XNN_UNPREDICTABLE(i13 != zero) {
+ i13 = (const uint8_t*) ((uintptr_t) i13 + input_offset);
+ }
+ const uint8_t* i14 = input[14];
+ assert(i14 != NULL);
+ if XNN_UNPREDICTABLE(i14 != zero) {
+ i14 = (const uint8_t*) ((uintptr_t) i14 + input_offset);
+ }
+ const uint8_t* i15 = input[15];
+ assert(i15 != NULL);
+ if XNN_UNPREDICTABLE(i15 != zero) {
+ i15 = (const uint8_t*) ((uintptr_t) i15 + input_offset);
+ }
+ const uint8_t* i16 = input[16];
+ assert(i16 != NULL);
+ if XNN_UNPREDICTABLE(i16 != zero) {
+ i16 = (const uint8_t*) ((uintptr_t) i16 + input_offset);
+ }
+ const uint8_t* i17 = input[17];
+ assert(i17 != NULL);
+ if XNN_UNPREDICTABLE(i17 != zero) {
+ i17 = (const uint8_t*) ((uintptr_t) i17 + input_offset);
+ }
+ const uint8_t* i18 = input[18];
+ assert(i18 != NULL);
+ if XNN_UNPREDICTABLE(i18 != zero) {
+ i18 = (const uint8_t*) ((uintptr_t) i18 + input_offset);
+ }
+ const uint8_t* i19 = input[19];
+ assert(i19 != NULL);
+ if XNN_UNPREDICTABLE(i19 != zero) {
+ i19 = (const uint8_t*) ((uintptr_t) i19 + input_offset);
+ }
+ const uint8_t* i20 = input[20];
+ assert(i20 != NULL);
+ if XNN_UNPREDICTABLE(i20 != zero) {
+ i20 = (const uint8_t*) ((uintptr_t) i20 + input_offset);
+ }
+ const uint8_t* i21 = input[21];
+ assert(i21 != NULL);
+ if XNN_UNPREDICTABLE(i21 != zero) {
+ i21 = (const uint8_t*) ((uintptr_t) i21 + input_offset);
+ }
+ const uint8_t* i22 = input[22];
+ assert(i22 != NULL);
+ if XNN_UNPREDICTABLE(i22 != zero) {
+ i22 = (const uint8_t*) ((uintptr_t) i22 + input_offset);
+ }
+ const uint8_t* i23 = input[23];
+ assert(i23 != NULL);
+ if XNN_UNPREDICTABLE(i23 != zero) {
+ i23 = (const uint8_t*) ((uintptr_t) i23 + input_offset);
+ }
+ const uint8_t* i24 = input[24];
+ assert(i24 != NULL);
+ if XNN_UNPREDICTABLE(i24 != zero) {
+ i24 = (const uint8_t*) ((uintptr_t) i24 + input_offset);
+ }
+ input = (const uint8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const void* w = weights;
+ for (; c >= 16; c -= 16) {
+ __m512i vacc0123456789ABCDEF = _mm512_loadu_si512(w);
+
+
+ const __m512i vi0x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i0));
+ const __m512i vk0x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(uint8_t)))), vk_zero_point);
+ i0 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF));
+
+ const __m512i vi1x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i1));
+ const __m512i vk1x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(uint8_t)))), vk_zero_point);
+ i1 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF));
+
+ const __m512i vi2x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i2));
+ const __m512i vk2x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(uint8_t)))), vk_zero_point);
+ i2 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF));
+
+ const __m512i vi3x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i3));
+ const __m512i vk3x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(uint8_t)))), vk_zero_point);
+ i3 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF));
+
+ const __m512i vi4x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i4));
+ const __m512i vk4x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(uint8_t)))), vk_zero_point);
+ i4 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF));
+
+ const __m512i vi5x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i5));
+ const __m512i vk5x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(uint8_t)))), vk_zero_point);
+ i5 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF));
+
+ const __m512i vi6x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i6));
+ const __m512i vk6x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(uint8_t)))), vk_zero_point);
+ i6 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF));
+
+ const __m512i vi7x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i7));
+ const __m512i vk7x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(uint8_t)))), vk_zero_point);
+ i7 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF));
+
+ const __m512i vi8x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i8));
+ const __m512i vk8x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(uint8_t)))), vk_zero_point);
+ i8 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF));
+
+ const __m512i vi9x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i9));
+ const __m512i vk9x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(uint8_t)))), vk_zero_point);
+ i9 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF));
+
+ const __m512i vi10x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i10));
+ const __m512i vk10x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 160 * sizeof(uint8_t)))), vk_zero_point);
+ i10 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF));
+
+ const __m512i vi11x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i11));
+ const __m512i vk11x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 176 * sizeof(uint8_t)))), vk_zero_point);
+ i11 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF));
+
+ const __m512i vi12x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i12));
+ const __m512i vk12x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 192 * sizeof(uint8_t)))), vk_zero_point);
+ i12 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF));
+
+ const __m512i vi13x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i13));
+ const __m512i vk13x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 208 * sizeof(uint8_t)))), vk_zero_point);
+ i13 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF));
+
+ const __m512i vi14x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i14));
+ const __m512i vk14x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 224 * sizeof(uint8_t)))), vk_zero_point);
+ i14 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF));
+
+ const __m512i vi15x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i15));
+ const __m512i vk15x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 240 * sizeof(uint8_t)))), vk_zero_point);
+ i15 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF));
+
+ const __m512i vi16x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i16));
+ const __m512i vk16x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 256 * sizeof(uint8_t)))), vk_zero_point);
+ i16 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF));
+
+ const __m512i vi17x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i17));
+ const __m512i vk17x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 272 * sizeof(uint8_t)))), vk_zero_point);
+ i17 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF));
+
+ const __m512i vi18x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i18));
+ const __m512i vk18x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 288 * sizeof(uint8_t)))), vk_zero_point);
+ i18 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF));
+
+ const __m512i vi19x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i19));
+ const __m512i vk19x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 304 * sizeof(uint8_t)))), vk_zero_point);
+ i19 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF));
+
+ const __m512i vi20x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i20));
+ const __m512i vk20x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 320 * sizeof(uint8_t)))), vk_zero_point);
+ i20 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF));
+
+ const __m512i vi21x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i21));
+ const __m512i vk21x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 336 * sizeof(uint8_t)))), vk_zero_point);
+ i21 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF));
+
+ const __m512i vi22x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i22));
+ const __m512i vk22x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 352 * sizeof(uint8_t)))), vk_zero_point);
+ i22 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF));
+
+ const __m512i vi23x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i23));
+ const __m512i vk23x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 368 * sizeof(uint8_t)))), vk_zero_point);
+ i23 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF));
+
+ const __m512i vi24x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i24));
+ const __m512i vk24x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 384 * sizeof(uint8_t)))), vk_zero_point);
+ i24 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF));
+
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 400 * sizeof(uint8_t));
+
+ __m512 vscaled0123456789ABCDEF = _mm512_cvtepi32_ps(vacc0123456789ABCDEF);
+
+ vscaled0123456789ABCDEF = _mm512_mul_ps(vscaled0123456789ABCDEF, vscale);
+
+ vacc0123456789ABCDEF = _mm512_cvtps_epi32(vscaled0123456789ABCDEF);
+
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vacc0123456789ABCDEF), _mm512_extracti32x8_epi32(vacc0123456789ABCDEF, 1)), voutput_zero_point);
+
+ const __m128i vout012389AB = _mm256_castsi256_si128(vout012389AB4567CDEF);
+ const __m128i vout4567CDEF = _mm256_extracti128_si256(vout012389AB4567CDEF, 1);
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packus_epi16(vout012389AB, vout4567CDEF), _MM_SHUFFLE(3, 1, 2, 0));
+
+ vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
+ vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ output += 16;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ // Prepare mask for valid 8-bit elements (depends on nc).
+ const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << (c & 15)) - UINT32_C(1)));
+ {
+ __m512i vacc0123456789ABCDEF = _mm512_loadu_si512(w);
+
+
+ const __m512i vi0x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i0));
+ const __m512i vk0x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF));
+
+ const __m512i vi1x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i1));
+ const __m512i vk1x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF));
+
+ const __m512i vi2x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i2));
+ const __m512i vk2x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF));
+
+ const __m512i vi3x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i3));
+ const __m512i vk3x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF));
+
+ const __m512i vi4x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i4));
+ const __m512i vk4x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF));
+
+ const __m512i vi5x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i5));
+ const __m512i vk5x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF));
+
+ const __m512i vi6x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i6));
+ const __m512i vk6x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF));
+
+ const __m512i vi7x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i7));
+ const __m512i vk7x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF));
+
+ const __m512i vi8x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i8));
+ const __m512i vk8x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF));
+
+ const __m512i vi9x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i9));
+ const __m512i vk9x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF));
+
+ const __m512i vi10x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i10));
+ const __m512i vk10x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 160 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF));
+
+ const __m512i vi11x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i11));
+ const __m512i vk11x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 176 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF));
+
+ const __m512i vi12x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i12));
+ const __m512i vk12x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 192 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF));
+
+ const __m512i vi13x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i13));
+ const __m512i vk13x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 208 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF));
+
+ const __m512i vi14x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i14));
+ const __m512i vk14x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 224 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF));
+
+ const __m512i vi15x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i15));
+ const __m512i vk15x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 240 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF));
+
+ const __m512i vi16x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i16));
+ const __m512i vk16x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 256 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF));
+
+ const __m512i vi17x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i17));
+ const __m512i vk17x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 272 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF));
+
+ const __m512i vi18x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i18));
+ const __m512i vk18x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 288 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF));
+
+ const __m512i vi19x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i19));
+ const __m512i vk19x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 304 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF));
+
+ const __m512i vi20x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i20));
+ const __m512i vk20x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 320 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF));
+
+ const __m512i vi21x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i21));
+ const __m512i vk21x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 336 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF));
+
+ const __m512i vi22x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i22));
+ const __m512i vk22x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 352 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF));
+
+ const __m512i vi23x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i23));
+ const __m512i vk23x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 368 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF));
+
+ const __m512i vi24x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i24));
+ const __m512i vk24x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 384 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF));
+
+
+ __m512 vscaled0123456789ABCDEF = _mm512_cvtepi32_ps(vacc0123456789ABCDEF);
+ vscaled0123456789ABCDEF = _mm512_mul_ps(vscaled0123456789ABCDEF, vscale);
+ vacc0123456789ABCDEF = _mm512_cvtps_epi32(vscaled0123456789ABCDEF);
+
+
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vacc0123456789ABCDEF), _mm512_extracti32x8_epi32(vacc0123456789ABCDEF, 1)), voutput_zero_point);
+
+ const __m128i vout012389AB = _mm256_castsi256_si128(vout012389AB4567CDEF);
+ const __m128i vout4567CDEF = _mm256_extracti128_si256(vout012389AB4567CDEF, 1);
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packus_epi16(vout012389AB, vout4567CDEF), _MM_SHUFFLE(3, 1, 2, 0));
+ vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
+ vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
+
+ _mm_mask_storeu_epi8(output, vmask, vout0123456789ABCDEF);
+ output = (uint8_t*) ((uintptr_t) output + c);
+ }
+ }
+
+ output = (uint8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qu8-dwconv/gen/up16x9-minmax-fp32-avx512skx-mul32.c b/src/qu8-dwconv/gen/up16x9-minmax-fp32-avx512skx-mul32.c
new file mode 100644
index 0000000..e58ce5f
--- /dev/null
+++ b/src/qu8-dwconv/gen/up16x9-minmax-fp32-avx512skx-mul32.c
@@ -0,0 +1,240 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/unipass-avx512skx-mul32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/intrinsics-polyfill.h>
+
+
+void xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32(
+ size_t channels,
+ size_t output_width,
+ const uint8_t** input,
+ const void* weights,
+ uint8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const uint8_t* zero,
+ const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ const __m512 vscale = _mm512_load_ps(params->fp32_avx512.scale);
+ const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->fp32_avx512.output_zero_point);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_avx512.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_avx512.output_max);
+
+ const __m512i vk_zero_point = _mm512_cvtepu16_epi32(_mm256_load_si256((const __m256i*) params->fp32_avx512.kernel_zero_point));
+ do {
+ const uint8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const uint8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const uint8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const uint8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const uint8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const uint8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const uint8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const uint8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const uint8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const uint8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const void* w = weights;
+ for (; c >= 16; c -= 16) {
+ __m512i vacc0123456789ABCDEF = _mm512_loadu_si512(w);
+
+
+ const __m512i vi0x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i0));
+ const __m512i vk0x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(uint8_t)))), vk_zero_point);
+ i0 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF));
+
+ const __m512i vi1x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i1));
+ const __m512i vk1x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(uint8_t)))), vk_zero_point);
+ i1 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF));
+
+ const __m512i vi2x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i2));
+ const __m512i vk2x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(uint8_t)))), vk_zero_point);
+ i2 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF));
+
+ const __m512i vi3x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i3));
+ const __m512i vk3x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(uint8_t)))), vk_zero_point);
+ i3 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF));
+
+ const __m512i vi4x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i4));
+ const __m512i vk4x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(uint8_t)))), vk_zero_point);
+ i4 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF));
+
+ const __m512i vi5x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i5));
+ const __m512i vk5x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(uint8_t)))), vk_zero_point);
+ i5 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF));
+
+ const __m512i vi6x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i6));
+ const __m512i vk6x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(uint8_t)))), vk_zero_point);
+ i6 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF));
+
+ const __m512i vi7x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i7));
+ const __m512i vk7x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(uint8_t)))), vk_zero_point);
+ i7 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF));
+
+ const __m512i vi8x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i8));
+ const __m512i vk8x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(uint8_t)))), vk_zero_point);
+ i8 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF));
+
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(uint8_t));
+
+ __m512 vscaled0123456789ABCDEF = _mm512_cvtepi32_ps(vacc0123456789ABCDEF);
+
+ vscaled0123456789ABCDEF = _mm512_mul_ps(vscaled0123456789ABCDEF, vscale);
+
+ vacc0123456789ABCDEF = _mm512_cvtps_epi32(vscaled0123456789ABCDEF);
+
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vacc0123456789ABCDEF), _mm512_extracti32x8_epi32(vacc0123456789ABCDEF, 1)), voutput_zero_point);
+
+ const __m128i vout012389AB = _mm256_castsi256_si128(vout012389AB4567CDEF);
+ const __m128i vout4567CDEF = _mm256_extracti128_si256(vout012389AB4567CDEF, 1);
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packus_epi16(vout012389AB, vout4567CDEF), _MM_SHUFFLE(3, 1, 2, 0));
+
+ vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
+ vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ output += 16;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ // Prepare mask for valid 8-bit elements (depends on nc).
+ const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << (c & 15)) - UINT32_C(1)));
+ {
+ __m512i vacc0123456789ABCDEF = _mm512_loadu_si512(w);
+
+
+ const __m512i vi0x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i0));
+ const __m512i vk0x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF));
+
+ const __m512i vi1x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i1));
+ const __m512i vk1x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF));
+
+ const __m512i vi2x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i2));
+ const __m512i vk2x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF));
+
+ const __m512i vi3x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i3));
+ const __m512i vk3x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF));
+
+ const __m512i vi4x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i4));
+ const __m512i vk4x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF));
+
+ const __m512i vi5x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i5));
+ const __m512i vk5x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF));
+
+ const __m512i vi6x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i6));
+ const __m512i vk6x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF));
+
+ const __m512i vi7x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i7));
+ const __m512i vk7x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF));
+
+ const __m512i vi8x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i8));
+ const __m512i vk8x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(uint8_t)))), vk_zero_point);
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF));
+
+
+ __m512 vscaled0123456789ABCDEF = _mm512_cvtepi32_ps(vacc0123456789ABCDEF);
+ vscaled0123456789ABCDEF = _mm512_mul_ps(vscaled0123456789ABCDEF, vscale);
+ vacc0123456789ABCDEF = _mm512_cvtps_epi32(vscaled0123456789ABCDEF);
+
+
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vacc0123456789ABCDEF), _mm512_extracti32x8_epi32(vacc0123456789ABCDEF, 1)), voutput_zero_point);
+
+ const __m128i vout012389AB = _mm256_castsi256_si128(vout012389AB4567CDEF);
+ const __m128i vout4567CDEF = _mm256_extracti128_si256(vout012389AB4567CDEF, 1);
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packus_epi16(vout012389AB, vout4567CDEF), _MM_SHUFFLE(3, 1, 2, 0));
+ vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
+ vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
+
+ _mm_mask_storeu_epi8(output, vmask, vout0123456789ABCDEF);
+ output = (uint8_t*) ((uintptr_t) output + c);
+ }
+ }
+
+ output = (uint8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qu8-dwconv/gen/up32x25-minmax-fp32-avx512skx-mul32.c b/src/qu8-dwconv/gen/up32x25-minmax-fp32-avx512skx-mul32.c
new file mode 100644
index 0000000..4b6cb98
--- /dev/null
+++ b/src/qu8-dwconv/gen/up32x25-minmax-fp32-avx512skx-mul32.c
@@ -0,0 +1,619 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/unipass-avx512skx-mul32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/intrinsics-polyfill.h>
+
+
+void xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32(
+ size_t channels,
+ size_t output_width,
+ const uint8_t** input,
+ const void* weights,
+ uint8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const uint8_t* zero,
+ const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ const __m512 vscale = _mm512_load_ps(params->fp32_avx512.scale);
+ const __m512i voutput_zero_point = _mm512_load_si512(params->fp32_avx512.output_zero_point);
+ const __m256i voutput_min = _mm256_load_si256((const __m256i*) params->fp32_avx512.output_min);
+ const __m256i voutput_max = _mm256_load_si256((const __m256i*) params->fp32_avx512.output_max);
+ const __m256i vpermute_mask = _mm256_set_epi32(7, 3, 5, 1, 6, 2, 4, 0);
+
+ const __m512i vk_zero_point = _mm512_cvtepu16_epi32(_mm256_load_si256((const __m256i*) params->fp32_avx512.kernel_zero_point));
+ do {
+ const uint8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const uint8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const uint8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const uint8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const uint8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const uint8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const uint8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const uint8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const uint8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ const uint8_t* i9 = input[9];
+ assert(i9 != NULL);
+ if XNN_UNPREDICTABLE(i9 != zero) {
+ i9 = (const uint8_t*) ((uintptr_t) i9 + input_offset);
+ }
+ const uint8_t* i10 = input[10];
+ assert(i10 != NULL);
+ if XNN_UNPREDICTABLE(i10 != zero) {
+ i10 = (const uint8_t*) ((uintptr_t) i10 + input_offset);
+ }
+ const uint8_t* i11 = input[11];
+ assert(i11 != NULL);
+ if XNN_UNPREDICTABLE(i11 != zero) {
+ i11 = (const uint8_t*) ((uintptr_t) i11 + input_offset);
+ }
+ const uint8_t* i12 = input[12];
+ assert(i12 != NULL);
+ if XNN_UNPREDICTABLE(i12 != zero) {
+ i12 = (const uint8_t*) ((uintptr_t) i12 + input_offset);
+ }
+ const uint8_t* i13 = input[13];
+ assert(i13 != NULL);
+ if XNN_UNPREDICTABLE(i13 != zero) {
+ i13 = (const uint8_t*) ((uintptr_t) i13 + input_offset);
+ }
+ const uint8_t* i14 = input[14];
+ assert(i14 != NULL);
+ if XNN_UNPREDICTABLE(i14 != zero) {
+ i14 = (const uint8_t*) ((uintptr_t) i14 + input_offset);
+ }
+ const uint8_t* i15 = input[15];
+ assert(i15 != NULL);
+ if XNN_UNPREDICTABLE(i15 != zero) {
+ i15 = (const uint8_t*) ((uintptr_t) i15 + input_offset);
+ }
+ const uint8_t* i16 = input[16];
+ assert(i16 != NULL);
+ if XNN_UNPREDICTABLE(i16 != zero) {
+ i16 = (const uint8_t*) ((uintptr_t) i16 + input_offset);
+ }
+ const uint8_t* i17 = input[17];
+ assert(i17 != NULL);
+ if XNN_UNPREDICTABLE(i17 != zero) {
+ i17 = (const uint8_t*) ((uintptr_t) i17 + input_offset);
+ }
+ const uint8_t* i18 = input[18];
+ assert(i18 != NULL);
+ if XNN_UNPREDICTABLE(i18 != zero) {
+ i18 = (const uint8_t*) ((uintptr_t) i18 + input_offset);
+ }
+ const uint8_t* i19 = input[19];
+ assert(i19 != NULL);
+ if XNN_UNPREDICTABLE(i19 != zero) {
+ i19 = (const uint8_t*) ((uintptr_t) i19 + input_offset);
+ }
+ const uint8_t* i20 = input[20];
+ assert(i20 != NULL);
+ if XNN_UNPREDICTABLE(i20 != zero) {
+ i20 = (const uint8_t*) ((uintptr_t) i20 + input_offset);
+ }
+ const uint8_t* i21 = input[21];
+ assert(i21 != NULL);
+ if XNN_UNPREDICTABLE(i21 != zero) {
+ i21 = (const uint8_t*) ((uintptr_t) i21 + input_offset);
+ }
+ const uint8_t* i22 = input[22];
+ assert(i22 != NULL);
+ if XNN_UNPREDICTABLE(i22 != zero) {
+ i22 = (const uint8_t*) ((uintptr_t) i22 + input_offset);
+ }
+ const uint8_t* i23 = input[23];
+ assert(i23 != NULL);
+ if XNN_UNPREDICTABLE(i23 != zero) {
+ i23 = (const uint8_t*) ((uintptr_t) i23 + input_offset);
+ }
+ const uint8_t* i24 = input[24];
+ assert(i24 != NULL);
+ if XNN_UNPREDICTABLE(i24 != zero) {
+ i24 = (const uint8_t*) ((uintptr_t) i24 + input_offset);
+ }
+ input = (const uint8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const void* w = weights;
+ for (; c >= 32; c -= 32) {
+ __m512i vacc0123456789ABCDEF = _mm512_loadu_si512(w);
+ __m512i vaccGHIJKLMNOPQRSTUV = _mm512_loadu_si512((const void*) ((uintptr_t) w + 16 * sizeof(int32_t)));
+
+
+ const __m512i vi0x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i0));
+ const __m512i vk0x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 0 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi0xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i0 + 16)));
+ const __m512i vk0xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 16 * sizeof(uint8_t)))), vk_zero_point);
+ i0 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi0xGHIJKLMNOPQRSTUV, vk0xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi1x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i1));
+ const __m512i vk1x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 32 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi1xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i1 + 16)));
+ const __m512i vk1xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 48 * sizeof(uint8_t)))), vk_zero_point);
+ i1 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi1xGHIJKLMNOPQRSTUV, vk1xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi2x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i2));
+ const __m512i vk2x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 64 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi2xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i2 + 16)));
+ const __m512i vk2xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 80 * sizeof(uint8_t)))), vk_zero_point);
+ i2 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi2xGHIJKLMNOPQRSTUV, vk2xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi3x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i3));
+ const __m512i vk3x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 96 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi3xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i3 + 16)));
+ const __m512i vk3xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 112 * sizeof(uint8_t)))), vk_zero_point);
+ i3 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi3xGHIJKLMNOPQRSTUV, vk3xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi4x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i4));
+ const __m512i vk4x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 128 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi4xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i4 + 16)));
+ const __m512i vk4xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 144 * sizeof(uint8_t)))), vk_zero_point);
+ i4 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi4xGHIJKLMNOPQRSTUV, vk4xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi5x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i5));
+ const __m512i vk5x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 160 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi5xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i5 + 16)));
+ const __m512i vk5xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 176 * sizeof(uint8_t)))), vk_zero_point);
+ i5 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi5xGHIJKLMNOPQRSTUV, vk5xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi6x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i6));
+ const __m512i vk6x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 192 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi6xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i6 + 16)));
+ const __m512i vk6xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 208 * sizeof(uint8_t)))), vk_zero_point);
+ i6 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi6xGHIJKLMNOPQRSTUV, vk6xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi7x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i7));
+ const __m512i vk7x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 224 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi7xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i7 + 16)));
+ const __m512i vk7xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 240 * sizeof(uint8_t)))), vk_zero_point);
+ i7 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi7xGHIJKLMNOPQRSTUV, vk7xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi8x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i8));
+ const __m512i vk8x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 256 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi8xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i8 + 16)));
+ const __m512i vk8xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 272 * sizeof(uint8_t)))), vk_zero_point);
+ i8 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi8xGHIJKLMNOPQRSTUV, vk8xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi9x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i9));
+ const __m512i vk9x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 288 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi9xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i9 + 16)));
+ const __m512i vk9xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 304 * sizeof(uint8_t)))), vk_zero_point);
+ i9 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi9xGHIJKLMNOPQRSTUV, vk9xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi10x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i10));
+ const __m512i vk10x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 320 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi10xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i10 + 16)));
+ const __m512i vk10xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 336 * sizeof(uint8_t)))), vk_zero_point);
+ i10 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi10xGHIJKLMNOPQRSTUV, vk10xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi11x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i11));
+ const __m512i vk11x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 352 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi11xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i11 + 16)));
+ const __m512i vk11xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 368 * sizeof(uint8_t)))), vk_zero_point);
+ i11 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi11xGHIJKLMNOPQRSTUV, vk11xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi12x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i12));
+ const __m512i vk12x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 384 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi12xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i12 + 16)));
+ const __m512i vk12xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 400 * sizeof(uint8_t)))), vk_zero_point);
+ i12 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi12xGHIJKLMNOPQRSTUV, vk12xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi13x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i13));
+ const __m512i vk13x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 416 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi13xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i13 + 16)));
+ const __m512i vk13xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 432 * sizeof(uint8_t)))), vk_zero_point);
+ i13 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi13xGHIJKLMNOPQRSTUV, vk13xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi14x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i14));
+ const __m512i vk14x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 448 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi14xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i14 + 16)));
+ const __m512i vk14xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 464 * sizeof(uint8_t)))), vk_zero_point);
+ i14 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi14xGHIJKLMNOPQRSTUV, vk14xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi15x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i15));
+ const __m512i vk15x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 480 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi15xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i15 + 16)));
+ const __m512i vk15xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 496 * sizeof(uint8_t)))), vk_zero_point);
+ i15 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi15xGHIJKLMNOPQRSTUV, vk15xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi16x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i16));
+ const __m512i vk16x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 512 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi16xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i16 + 16)));
+ const __m512i vk16xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 528 * sizeof(uint8_t)))), vk_zero_point);
+ i16 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi16xGHIJKLMNOPQRSTUV, vk16xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi17x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i17));
+ const __m512i vk17x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 544 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi17xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i17 + 16)));
+ const __m512i vk17xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 560 * sizeof(uint8_t)))), vk_zero_point);
+ i17 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi17xGHIJKLMNOPQRSTUV, vk17xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi18x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i18));
+ const __m512i vk18x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 576 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi18xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i18 + 16)));
+ const __m512i vk18xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 592 * sizeof(uint8_t)))), vk_zero_point);
+ i18 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi18xGHIJKLMNOPQRSTUV, vk18xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi19x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i19));
+ const __m512i vk19x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 608 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi19xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i19 + 16)));
+ const __m512i vk19xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 624 * sizeof(uint8_t)))), vk_zero_point);
+ i19 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi19xGHIJKLMNOPQRSTUV, vk19xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi20x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i20));
+ const __m512i vk20x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 640 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi20xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i20 + 16)));
+ const __m512i vk20xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 656 * sizeof(uint8_t)))), vk_zero_point);
+ i20 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi20xGHIJKLMNOPQRSTUV, vk20xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi21x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i21));
+ const __m512i vk21x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 672 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi21xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i21 + 16)));
+ const __m512i vk21xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 688 * sizeof(uint8_t)))), vk_zero_point);
+ i21 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi21xGHIJKLMNOPQRSTUV, vk21xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi22x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i22));
+ const __m512i vk22x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 704 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi22xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i22 + 16)));
+ const __m512i vk22xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 720 * sizeof(uint8_t)))), vk_zero_point);
+ i22 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi22xGHIJKLMNOPQRSTUV, vk22xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi23x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i23));
+ const __m512i vk23x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 736 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi23xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i23 + 16)));
+ const __m512i vk23xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 752 * sizeof(uint8_t)))), vk_zero_point);
+ i23 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi23xGHIJKLMNOPQRSTUV, vk23xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi24x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i24));
+ const __m512i vk24x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 768 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi24xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i24 + 16)));
+ const __m512i vk24xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 784 * sizeof(uint8_t)))), vk_zero_point);
+ i24 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi24xGHIJKLMNOPQRSTUV, vk24xGHIJKLMNOPQRSTUV));
+
+ w = (const void*) ((uintptr_t) w + 32 * sizeof(int32_t) + 800 * sizeof(uint8_t));
+
+ __m512 vscaled0123456789ABCDEF = _mm512_cvtepi32_ps(vacc0123456789ABCDEF);
+ __m512 vscaledGHIJKLMNOPQRSTUV = _mm512_cvtepi32_ps(vaccGHIJKLMNOPQRSTUV);
+
+ vscaled0123456789ABCDEF = _mm512_mul_ps(vscaled0123456789ABCDEF, vscale);
+ vscaledGHIJKLMNOPQRSTUV = _mm512_mul_ps(vscaledGHIJKLMNOPQRSTUV, vscale);
+
+ vacc0123456789ABCDEF = _mm512_cvtps_epi32(vscaled0123456789ABCDEF);
+ vaccGHIJKLMNOPQRSTUV = _mm512_cvtps_epi32(vscaledGHIJKLMNOPQRSTUV);
+
+ __m512i vout0123GHIJ4567KLMN89ABOPQRCDEFSTUV = _mm512_adds_epi16(_mm512_packs_epi32(vacc0123456789ABCDEF, vaccGHIJKLMNOPQRSTUV), voutput_zero_point);
+ __m256i voutGHIJOPQRKLMNSTUV = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vaccGHIJKLMNOPQRSTUV), _mm512_extracti32x8_epi32(vaccGHIJKLMNOPQRSTUV, 1)), _mm512_castsi512_si256(voutput_zero_point));
+
+ const __m256i vout0123GHIJ4567KLMN = _mm512_castsi512_si256(vout0123GHIJ4567KLMN89ABOPQRCDEFSTUV);
+ const __m256i vout89ABOPQRCDEFSTUV = _mm512_extracti32x8_epi32(vout0123GHIJ4567KLMN89ABOPQRCDEFSTUV, 1);
+ const __m256i vout0123GHIJ89ABOPQR4567KLMNCDEFSTUV = _mm256_packus_epi16(vout0123GHIJ4567KLMN, vout89ABOPQRCDEFSTUV);
+ __m256i vout0123456789ABCDEFGHIJKLMNOPQRSTUV = _mm256_permutevar8x32_epi32(vout0123GHIJ89ABOPQR4567KLMNCDEFSTUV, vpermute_mask);
+ const __m128i voutGHIJOPQR = _mm256_castsi256_si128(voutGHIJOPQRKLMNSTUV);
+ const __m128i voutKLMNSTUV = _mm256_extracti128_si256(voutGHIJOPQRKLMNSTUV, 1);
+ __m128i voutGHIJKLMNOPQRSTUV = _mm_shuffle_epi32(_mm_packus_epi16(voutGHIJOPQR, voutKLMNSTUV), _MM_SHUFFLE(3, 1, 2, 0));
+
+ vout0123456789ABCDEFGHIJKLMNOPQRSTUV = _mm256_max_epu8(vout0123456789ABCDEFGHIJKLMNOPQRSTUV, voutput_min);
+ vout0123456789ABCDEFGHIJKLMNOPQRSTUV = _mm256_min_epu8(vout0123456789ABCDEFGHIJKLMNOPQRSTUV, voutput_max);
+ voutGHIJKLMNOPQRSTUV = _mm_max_epu8(voutGHIJKLMNOPQRSTUV, _mm256_castsi256_si128(voutput_min));
+ voutGHIJKLMNOPQRSTUV = _mm_min_epu8(voutGHIJKLMNOPQRSTUV, _mm256_castsi256_si128(voutput_max));
+
+ _mm256_storeu_si256((__m256i*) output, vout0123456789ABCDEFGHIJKLMNOPQRSTUV);
+ _mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
+ output += 32;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ // Prepare mask for valid 8-bit elements (depends on nc).
+ const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << (c & 15)) - UINT32_C(1)));
+ const uint8_t* k = (const uint8_t*) ((uintptr_t) w + 32 * sizeof(int32_t));
+ do {
+ __m512i vacc0123456789ABCDEF = _mm512_loadu_si512(w);
+
+
+ const __m512i vi0x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i0));
+ const __m512i vk0x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) k)), vk_zero_point);
+ i0 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF));
+
+ const __m512i vi1x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i1));
+ const __m512i vk1x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 32))), vk_zero_point);
+ i1 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF));
+
+ const __m512i vi2x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i2));
+ const __m512i vk2x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 64))), vk_zero_point);
+ i2 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF));
+
+ const __m512i vi3x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i3));
+ const __m512i vk3x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 96))), vk_zero_point);
+ i3 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF));
+
+ const __m512i vi4x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i4));
+ const __m512i vk4x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 128))), vk_zero_point);
+ i4 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF));
+
+ const __m512i vi5x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i5));
+ const __m512i vk5x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 160))), vk_zero_point);
+ i5 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF));
+
+ const __m512i vi6x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i6));
+ const __m512i vk6x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 192))), vk_zero_point);
+ i6 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF));
+
+ const __m512i vi7x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i7));
+ const __m512i vk7x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 224))), vk_zero_point);
+ i7 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF));
+
+ const __m512i vi8x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i8));
+ const __m512i vk8x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 256))), vk_zero_point);
+ i8 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF));
+
+ const __m512i vi9x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i9));
+ const __m512i vk9x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 288))), vk_zero_point);
+ i9 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF));
+
+ const __m512i vi10x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i10));
+ const __m512i vk10x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 320))), vk_zero_point);
+ i10 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF));
+
+ const __m512i vi11x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i11));
+ const __m512i vk11x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 352))), vk_zero_point);
+ i11 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF));
+
+ const __m512i vi12x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i12));
+ const __m512i vk12x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 384))), vk_zero_point);
+ i12 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF));
+
+ const __m512i vi13x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i13));
+ const __m512i vk13x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 416))), vk_zero_point);
+ i13 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF));
+
+ const __m512i vi14x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i14));
+ const __m512i vk14x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 448))), vk_zero_point);
+ i14 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF));
+
+ const __m512i vi15x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i15));
+ const __m512i vk15x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 480))), vk_zero_point);
+ i15 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF));
+
+ const __m512i vi16x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i16));
+ const __m512i vk16x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 512))), vk_zero_point);
+ i16 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF));
+
+ const __m512i vi17x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i17));
+ const __m512i vk17x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 544))), vk_zero_point);
+ i17 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF));
+
+ const __m512i vi18x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i18));
+ const __m512i vk18x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 576))), vk_zero_point);
+ i18 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF));
+
+ const __m512i vi19x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i19));
+ const __m512i vk19x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 608))), vk_zero_point);
+ i19 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF));
+
+ const __m512i vi20x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i20));
+ const __m512i vk20x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 640))), vk_zero_point);
+ i20 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF));
+
+ const __m512i vi21x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i21));
+ const __m512i vk21x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 672))), vk_zero_point);
+ i21 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF));
+
+ const __m512i vi22x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i22));
+ const __m512i vk22x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 704))), vk_zero_point);
+ i22 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF));
+
+ const __m512i vi23x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i23));
+ const __m512i vk23x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 736))), vk_zero_point);
+ i23 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF));
+
+ const __m512i vi24x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i24));
+ const __m512i vk24x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 768))), vk_zero_point);
+ i24 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF));
+
+ k += 16;
+
+ __m512 vscaled0123456789ABCDEF = _mm512_cvtepi32_ps(vacc0123456789ABCDEF);
+ vscaled0123456789ABCDEF = _mm512_mul_ps(vscaled0123456789ABCDEF, vscale);
+ vacc0123456789ABCDEF = _mm512_cvtps_epi32(vscaled0123456789ABCDEF);
+
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t));
+
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vacc0123456789ABCDEF), _mm512_extracti32x8_epi32(vacc0123456789ABCDEF, 1)), _mm512_castsi512_si256(voutput_zero_point));
+
+ const __m128i vout012389AB = _mm256_castsi256_si128(vout012389AB4567CDEF);
+ const __m128i vout4567CDEF = _mm256_extracti128_si256(vout012389AB4567CDEF, 1);
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packus_epi16(vout012389AB, vout4567CDEF), _MM_SHUFFLE(3, 1, 2, 0));
+ vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, _mm256_castsi256_si128(voutput_min));
+ vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, _mm256_castsi256_si128(voutput_max));
+
+ if XNN_LIKELY(c >= 16) {
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ output += 16;
+ c -= 16;
+ } else {
+ _mm_mask_storeu_epi8(output, vmask, vout0123456789ABCDEF);
+ output = (uint8_t*) ((uintptr_t) output + c);
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (uint8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qu8-dwconv/gen/up32x9-minmax-fp32-avx512skx-mul32.c b/src/qu8-dwconv/gen/up32x9-minmax-fp32-avx512skx-mul32.c
new file mode 100644
index 0000000..6d978e8
--- /dev/null
+++ b/src/qu8-dwconv/gen/up32x9-minmax-fp32-avx512skx-mul32.c
@@ -0,0 +1,299 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/unipass-avx512skx-mul32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/dwconv.h>
+#include <xnnpack/intrinsics-polyfill.h>
+
+
+void xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32(
+ size_t channels,
+ size_t output_width,
+ const uint8_t** input,
+ const void* weights,
+ uint8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const uint8_t* zero,
+ const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ const __m512 vscale = _mm512_load_ps(params->fp32_avx512.scale);
+ const __m512i voutput_zero_point = _mm512_load_si512(params->fp32_avx512.output_zero_point);
+ const __m256i voutput_min = _mm256_load_si256((const __m256i*) params->fp32_avx512.output_min);
+ const __m256i voutput_max = _mm256_load_si256((const __m256i*) params->fp32_avx512.output_max);
+ const __m256i vpermute_mask = _mm256_set_epi32(7, 3, 5, 1, 6, 2, 4, 0);
+
+ const __m512i vk_zero_point = _mm512_cvtepu16_epi32(_mm256_load_si256((const __m256i*) params->fp32_avx512.kernel_zero_point));
+ do {
+ const uint8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const uint8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const uint8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const uint8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const uint8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const uint8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const uint8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const uint8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const uint8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const uint8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const void* w = weights;
+ for (; c >= 32; c -= 32) {
+ __m512i vacc0123456789ABCDEF = _mm512_loadu_si512(w);
+ __m512i vaccGHIJKLMNOPQRSTUV = _mm512_loadu_si512((const void*) ((uintptr_t) w + 16 * sizeof(int32_t)));
+
+
+ const __m512i vi0x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i0));
+ const __m512i vk0x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 0 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi0xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i0 + 16)));
+ const __m512i vk0xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 16 * sizeof(uint8_t)))), vk_zero_point);
+ i0 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi0xGHIJKLMNOPQRSTUV, vk0xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi1x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i1));
+ const __m512i vk1x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 32 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi1xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i1 + 16)));
+ const __m512i vk1xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 48 * sizeof(uint8_t)))), vk_zero_point);
+ i1 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi1xGHIJKLMNOPQRSTUV, vk1xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi2x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i2));
+ const __m512i vk2x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 64 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi2xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i2 + 16)));
+ const __m512i vk2xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 80 * sizeof(uint8_t)))), vk_zero_point);
+ i2 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi2xGHIJKLMNOPQRSTUV, vk2xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi3x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i3));
+ const __m512i vk3x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 96 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi3xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i3 + 16)));
+ const __m512i vk3xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 112 * sizeof(uint8_t)))), vk_zero_point);
+ i3 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi3xGHIJKLMNOPQRSTUV, vk3xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi4x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i4));
+ const __m512i vk4x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 128 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi4xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i4 + 16)));
+ const __m512i vk4xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 144 * sizeof(uint8_t)))), vk_zero_point);
+ i4 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi4xGHIJKLMNOPQRSTUV, vk4xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi5x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i5));
+ const __m512i vk5x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 160 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi5xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i5 + 16)));
+ const __m512i vk5xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 176 * sizeof(uint8_t)))), vk_zero_point);
+ i5 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi5xGHIJKLMNOPQRSTUV, vk5xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi6x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i6));
+ const __m512i vk6x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 192 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi6xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i6 + 16)));
+ const __m512i vk6xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 208 * sizeof(uint8_t)))), vk_zero_point);
+ i6 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi6xGHIJKLMNOPQRSTUV, vk6xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi7x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i7));
+ const __m512i vk7x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 224 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi7xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i7 + 16)));
+ const __m512i vk7xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 240 * sizeof(uint8_t)))), vk_zero_point);
+ i7 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi7xGHIJKLMNOPQRSTUV, vk7xGHIJKLMNOPQRSTUV));
+
+ const __m512i vi8x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i8));
+ const __m512i vk8x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 256 * sizeof(uint8_t)))), vk_zero_point);
+ const __m512i vi8xGHIJKLMNOPQRSTUV = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (i8 + 16)));
+ const __m512i vk8xGHIJKLMNOPQRSTUV = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_load_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 272 * sizeof(uint8_t)))), vk_zero_point);
+ i8 += 32;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF));
+ vaccGHIJKLMNOPQRSTUV = _mm512_add_epi32(vaccGHIJKLMNOPQRSTUV, _mm512_mullo_epi32(vi8xGHIJKLMNOPQRSTUV, vk8xGHIJKLMNOPQRSTUV));
+
+ w = (const void*) ((uintptr_t) w + 32 * sizeof(int32_t) + 288 * sizeof(uint8_t));
+
+ __m512 vscaled0123456789ABCDEF = _mm512_cvtepi32_ps(vacc0123456789ABCDEF);
+ __m512 vscaledGHIJKLMNOPQRSTUV = _mm512_cvtepi32_ps(vaccGHIJKLMNOPQRSTUV);
+
+ vscaled0123456789ABCDEF = _mm512_mul_ps(vscaled0123456789ABCDEF, vscale);
+ vscaledGHIJKLMNOPQRSTUV = _mm512_mul_ps(vscaledGHIJKLMNOPQRSTUV, vscale);
+
+ vacc0123456789ABCDEF = _mm512_cvtps_epi32(vscaled0123456789ABCDEF);
+ vaccGHIJKLMNOPQRSTUV = _mm512_cvtps_epi32(vscaledGHIJKLMNOPQRSTUV);
+
+ __m512i vout0123GHIJ4567KLMN89ABOPQRCDEFSTUV = _mm512_adds_epi16(_mm512_packs_epi32(vacc0123456789ABCDEF, vaccGHIJKLMNOPQRSTUV), voutput_zero_point);
+ __m256i voutGHIJOPQRKLMNSTUV = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vaccGHIJKLMNOPQRSTUV), _mm512_extracti32x8_epi32(vaccGHIJKLMNOPQRSTUV, 1)), _mm512_castsi512_si256(voutput_zero_point));
+
+ const __m256i vout0123GHIJ4567KLMN = _mm512_castsi512_si256(vout0123GHIJ4567KLMN89ABOPQRCDEFSTUV);
+ const __m256i vout89ABOPQRCDEFSTUV = _mm512_extracti32x8_epi32(vout0123GHIJ4567KLMN89ABOPQRCDEFSTUV, 1);
+ const __m256i vout0123GHIJ89ABOPQR4567KLMNCDEFSTUV = _mm256_packus_epi16(vout0123GHIJ4567KLMN, vout89ABOPQRCDEFSTUV);
+ __m256i vout0123456789ABCDEFGHIJKLMNOPQRSTUV = _mm256_permutevar8x32_epi32(vout0123GHIJ89ABOPQR4567KLMNCDEFSTUV, vpermute_mask);
+ const __m128i voutGHIJOPQR = _mm256_castsi256_si128(voutGHIJOPQRKLMNSTUV);
+ const __m128i voutKLMNSTUV = _mm256_extracti128_si256(voutGHIJOPQRKLMNSTUV, 1);
+ __m128i voutGHIJKLMNOPQRSTUV = _mm_shuffle_epi32(_mm_packus_epi16(voutGHIJOPQR, voutKLMNSTUV), _MM_SHUFFLE(3, 1, 2, 0));
+
+ vout0123456789ABCDEFGHIJKLMNOPQRSTUV = _mm256_max_epu8(vout0123456789ABCDEFGHIJKLMNOPQRSTUV, voutput_min);
+ vout0123456789ABCDEFGHIJKLMNOPQRSTUV = _mm256_min_epu8(vout0123456789ABCDEFGHIJKLMNOPQRSTUV, voutput_max);
+ voutGHIJKLMNOPQRSTUV = _mm_max_epu8(voutGHIJKLMNOPQRSTUV, _mm256_castsi256_si128(voutput_min));
+ voutGHIJKLMNOPQRSTUV = _mm_min_epu8(voutGHIJKLMNOPQRSTUV, _mm256_castsi256_si128(voutput_max));
+
+ _mm256_storeu_si256((__m256i*) output, vout0123456789ABCDEFGHIJKLMNOPQRSTUV);
+ _mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
+ output += 32;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ // Prepare mask for valid 8-bit elements (depends on nc).
+ const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << (c & 15)) - UINT32_C(1)));
+ const uint8_t* k = (const uint8_t*) ((uintptr_t) w + 32 * sizeof(int32_t));
+ do {
+ __m512i vacc0123456789ABCDEF = _mm512_loadu_si512(w);
+
+
+ const __m512i vi0x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i0));
+ const __m512i vk0x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) k)), vk_zero_point);
+ i0 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF));
+
+ const __m512i vi1x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i1));
+ const __m512i vk1x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 32))), vk_zero_point);
+ i1 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF));
+
+ const __m512i vi2x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i2));
+ const __m512i vk2x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 64))), vk_zero_point);
+ i2 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF));
+
+ const __m512i vi3x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i3));
+ const __m512i vk3x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 96))), vk_zero_point);
+ i3 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF));
+
+ const __m512i vi4x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i4));
+ const __m512i vk4x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 128))), vk_zero_point);
+ i4 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF));
+
+ const __m512i vi5x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i5));
+ const __m512i vk5x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 160))), vk_zero_point);
+ i5 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF));
+
+ const __m512i vi6x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i6));
+ const __m512i vk6x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 192))), vk_zero_point);
+ i6 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF));
+
+ const __m512i vi7x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i7));
+ const __m512i vk7x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 224))), vk_zero_point);
+ i7 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF));
+
+ const __m512i vi8x0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) i8));
+ const __m512i vk8x0123456789ABCDEF = _mm512_sub_epi32(_mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) (k + 256))), vk_zero_point);
+ i8 += 16;
+
+ vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF));
+
+ k += 16;
+
+ __m512 vscaled0123456789ABCDEF = _mm512_cvtepi32_ps(vacc0123456789ABCDEF);
+ vscaled0123456789ABCDEF = _mm512_mul_ps(vscaled0123456789ABCDEF, vscale);
+ vacc0123456789ABCDEF = _mm512_cvtps_epi32(vscaled0123456789ABCDEF);
+
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t));
+
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vacc0123456789ABCDEF), _mm512_extracti32x8_epi32(vacc0123456789ABCDEF, 1)), _mm512_castsi512_si256(voutput_zero_point));
+
+ const __m128i vout012389AB = _mm256_castsi256_si128(vout012389AB4567CDEF);
+ const __m128i vout4567CDEF = _mm256_extracti128_si256(vout012389AB4567CDEF, 1);
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packus_epi16(vout012389AB, vout4567CDEF), _MM_SHUFFLE(3, 1, 2, 0));
+ vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, _mm256_castsi256_si128(voutput_min));
+ vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, _mm256_castsi256_si128(voutput_max));
+
+ if XNN_LIKELY(c >= 16) {
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ output += 16;
+ c -= 16;
+ } else {
+ _mm_mask_storeu_epi8(output, vmask, vout0123456789ABCDEF);
+ output = (uint8_t*) ((uintptr_t) output + c);
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (uint8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/xnnpack/dwconv.h b/src/xnnpack/dwconv.h
index 5fed8a2..4a98d55 100644
--- a/src/xnnpack/dwconv.h
+++ b/src/xnnpack/dwconv.h
@@ -307,6 +307,9 @@
DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx2_mul32)
DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx2_mul32)
+DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32)
+DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32)
+
DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up8x25__sse41_mul32)
DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__sse41_mul32)
@@ -320,6 +323,9 @@
DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32)
DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32)
+DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32)
+DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32)
+
#define DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
diff --git a/test/qu8-dwconv-minmax-fp32.cc b/test/qu8-dwconv-minmax-fp32.cc
index f46adcf..a26da55 100644
--- a/test/qu8-dwconv-minmax-fp32.cc
+++ b/test/qu8-dwconv-minmax-fp32.cc
@@ -3231,6 +3231,434 @@
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X9__AVX512SKX_MUL32, c_eq_16) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(16)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X9__AVX512SKX_MUL32, c_div_16) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X9__AVX512SKX_MUL32, c_div_16_with_qmin) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X9__AVX512SKX_MUL32, c_div_16_with_qmax) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X9__AVX512SKX_MUL32, c_lt_16) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 1; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X9__AVX512SKX_MUL32, c_gt_16) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X9__AVX512SKX_MUL32, c_gt_16_with_qmin) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X9__AVX512SKX_MUL32, c_gt_16_with_qmax) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X9__AVX512SKX_MUL32, multipixel) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X9__AVX512SKX_MUL32, multipixel_with_step) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ for (size_t step = 2; step <= 9; step++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X9__AVX512SKX_MUL32, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(16)
+ .width(5)
+ .output_stride(83)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X9__AVX512SKX_MUL32, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X9__AVX512SKX_MUL32, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X9__AVX512SKX_MUL32, input_zero_point_only) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .input_zero_point(255)
+ .kernel_zero_point(0)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X9__AVX512SKX_MUL32, kernel_zero_point_only) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .input_zero_point(0)
+ .kernel_zero_point(255)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X9__AVX512SKX_MUL32, input_offset) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .input_offset(304)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X9__AVX512SKX_MUL32, zero) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t mz = 0; mz < 9; mz++) {
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .input_offset(304)
+ .zero_index(mz)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X9__AVX512SKX_MUL32, c_eq_32) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(9)
+ .channels(32)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X9__AVX512SKX_MUL32, c_div_32) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X9__AVX512SKX_MUL32, c_div_32_with_qmin) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X9__AVX512SKX_MUL32, c_div_32_with_qmax) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X9__AVX512SKX_MUL32, c_lt_32) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 1; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X9__AVX512SKX_MUL32, c_gt_32) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 33; channels < 64; channels++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X9__AVX512SKX_MUL32, c_gt_32_with_qmin) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 33; channels < 64; channels++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X9__AVX512SKX_MUL32, c_gt_32_with_qmax) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 33; channels < 64; channels++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X9__AVX512SKX_MUL32, multipixel) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X9__AVX512SKX_MUL32, multipixel_with_step) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ for (size_t step = 2; step <= 9; step++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X9__AVX512SKX_MUL32, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(9)
+ .channels(32)
+ .width(5)
+ .output_stride(163)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X9__AVX512SKX_MUL32, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X9__AVX512SKX_MUL32, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X9__AVX512SKX_MUL32, input_zero_point_only) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .input_zero_point(255)
+ .kernel_zero_point(0)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X9__AVX512SKX_MUL32, kernel_zero_point_only) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .input_zero_point(0)
+ .kernel_zero_point(255)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X9__AVX512SKX_MUL32, input_offset) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(9)
+ .channels(channels)
+ .input_offset(592)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X9__AVX512SKX_MUL32, zero) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t mz = 0; mz < 9; mz++) {
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(9)
+ .channels(channels)
+ .input_offset(592)
+ .zero_index(mz)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
TEST(QU8_DWCONV_MINMAX_FP32_UP8X25__SSE2_MUL16, c_eq_8) {
TEST_REQUIRES_X86_SSE2;
DWConvMicrokernelTester()
@@ -6438,3 +6866,431 @@
}
}
#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X25__AVX512SKX_MUL32, c_eq_16) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(16)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X25__AVX512SKX_MUL32, c_div_16) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X25__AVX512SKX_MUL32, c_div_16_with_qmin) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X25__AVX512SKX_MUL32, c_div_16_with_qmax) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X25__AVX512SKX_MUL32, c_lt_16) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 1; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X25__AVX512SKX_MUL32, c_gt_16) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X25__AVX512SKX_MUL32, c_gt_16_with_qmin) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X25__AVX512SKX_MUL32, c_gt_16_with_qmax) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X25__AVX512SKX_MUL32, multipixel) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X25__AVX512SKX_MUL32, multipixel_with_step) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ for (size_t step = 2; step <= 25; step++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X25__AVX512SKX_MUL32, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(16)
+ .width(5)
+ .output_stride(83)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X25__AVX512SKX_MUL32, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X25__AVX512SKX_MUL32, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X25__AVX512SKX_MUL32, input_zero_point_only) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .input_zero_point(255)
+ .kernel_zero_point(0)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X25__AVX512SKX_MUL32, kernel_zero_point_only) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .input_zero_point(0)
+ .kernel_zero_point(255)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X25__AVX512SKX_MUL32, input_offset) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .input_offset(304)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP16X25__AVX512SKX_MUL32, zero) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t mz = 0; mz < 25; mz++) {
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .input_offset(304)
+ .zero_index(mz)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X25__AVX512SKX_MUL32, c_eq_32) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(32)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X25__AVX512SKX_MUL32, c_div_32) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X25__AVX512SKX_MUL32, c_div_32_with_qmin) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X25__AVX512SKX_MUL32, c_div_32_with_qmax) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X25__AVX512SKX_MUL32, c_lt_32) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 1; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X25__AVX512SKX_MUL32, c_gt_32) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 33; channels < 64; channels++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X25__AVX512SKX_MUL32, c_gt_32_with_qmin) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 33; channels < 64; channels++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X25__AVX512SKX_MUL32, c_gt_32_with_qmax) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 33; channels < 64; channels++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X25__AVX512SKX_MUL32, multipixel) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X25__AVX512SKX_MUL32, multipixel_with_step) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ for (size_t step = 2; step <= 25; step++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X25__AVX512SKX_MUL32, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(32)
+ .width(5)
+ .output_stride(163)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X25__AVX512SKX_MUL32, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X25__AVX512SKX_MUL32, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X25__AVX512SKX_MUL32, input_zero_point_only) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .input_zero_point(255)
+ .kernel_zero_point(0)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X25__AVX512SKX_MUL32, kernel_zero_point_only) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .input_zero_point(0)
+ .kernel_zero_point(255)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X25__AVX512SKX_MUL32, input_offset) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .input_offset(592)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+
+ TEST(QU8_DWCONV_MINMAX_FP32_UP32X25__AVX512SKX_MUL32, zero) {
+ TEST_REQUIRES_X86_AVX512SKX;
+ for (uint32_t mz = 0; mz < 25; mz++) {
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .input_offset(592)
+ .zero_index(mz)
+ .Test(xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32, xnn_init_qu8_conv_minmax_fp32_avx512_params, xnn_init_qu8_requantization_fp32_params, xnn_qu8_requantize_fp32);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
diff --git a/test/qu8-dwconv-minmax-fp32.yaml b/test/qu8-dwconv-minmax-fp32.yaml
index 3be8a07..e9aa0be 100644
--- a/test/qu8-dwconv-minmax-fp32.yaml
+++ b/test/qu8-dwconv-minmax-fp32.yaml
@@ -32,6 +32,10 @@
init: xnn_init_qu8_conv_minmax_fp32_avx2_params
- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx2_mul32
init: xnn_init_qu8_conv_minmax_fp32_avx2_params
+- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up16x9__avx512skx_mul32
+ init: xnn_init_qu8_conv_minmax_fp32_avx512_params
+- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up32x9__avx512skx_mul32
+ init: xnn_init_qu8_conv_minmax_fp32_avx512_params
- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up8x25__sse2_mul16
init: xnn_init_qu8_conv_minmax_fp32_sse2_params
- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__sse2_mul16
@@ -62,3 +66,7 @@
init: xnn_init_qu8_conv_minmax_fp32_avx2_params
- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32
init: xnn_init_qu8_conv_minmax_fp32_avx2_params
+- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__avx512skx_mul32
+ init: xnn_init_qu8_conv_minmax_fp32_avx512_params
+- name: xnn_qu8_dwconv_minmax_fp32_ukernel_up32x25__avx512skx_mul32
+ init: xnn_init_qu8_conv_minmax_fp32_avx512_params