Support QC8 DWCONV microkernels
- Minimal set of 8-bit fixed-point microkernels with per-channel quantization
(QC8) optimized for AVX2.
- Extend packing functions to allow extra space after the kernel data.
Per-channel quantization parameters are later packed into that space.
- Extend DWConvMicrokernelTester to support unit testing of QC8 DWCONV.
PiperOrigin-RevId: 377620717
diff --git a/BUILD.bazel b/BUILD.bazel
index 54fd96e..209c8c2 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -3649,6 +3649,18 @@
"src/math/sigmoid-avx2-rr2-p5-div.c",
"src/math/sigmoid-avx2-rr2-p5-nr1fma.c",
"src/math/sigmoid-avx2-rr2-p5-nr2fma.c",
+ "src/qc8-dwconv/gen/up8x9-minmax-fp32-avx2-mul32.c",
+ "src/qc8-dwconv/gen/up8x25-minmax-fp32-avx2-mul32.c",
+ "src/qc8-dwconv/gen/up16x9-minmax-fp32-avx2-mul16.c",
+ "src/qc8-dwconv/gen/up16x9-minmax-fp32-avx2-mul32.c",
+ "src/qc8-dwconv/gen/up16x25-minmax-fp32-avx2-mul16.c",
+ "src/qc8-dwconv/gen/up16x25-minmax-fp32-avx2-mul32.c",
+ "src/qc8-dwconv/gen/up24x9-minmax-fp32-avx2-mul32.c",
+ "src/qc8-dwconv/gen/up24x25-minmax-fp32-avx2-mul32.c",
+ "src/qc8-dwconv/gen/up32x9-minmax-fp32-avx2-mul16.c",
+ "src/qc8-dwconv/gen/up32x9-minmax-fp32-avx2-mul32.c",
+ "src/qc8-dwconv/gen/up32x25-minmax-fp32-avx2-mul16.c",
+ "src/qc8-dwconv/gen/up32x25-minmax-fp32-avx2-mul32.c",
"src/qc8-gemm/gen/1x8c8-minmax-fp32-avx2.c",
"src/qc8-gemm/gen/1x8c8-xw-minmax-fp32-avx2.c",
"src/qc8-gemm/gen/2x8c8-minmax-fp32-avx2.c",
@@ -7235,6 +7247,17 @@
)
xnnpack_unit_test(
+ name = "qc8_dwconv_minmax_fp32_test",
+ timeout = "moderate",
+ srcs = [
+ "test/qc8-dwconv-minmax-fp32.cc",
+ "test/dwconv-microkernel-tester.h",
+ "src/xnnpack/AlignedAllocator.h",
+ ] + WEIGHTS_PACK_HDRS + MICROKERNEL_TEST_HDRS,
+ deps = MICROKERNEL_TEST_DEPS + [":packing"],
+)
+
+xnnpack_unit_test(
name = "qc8_gemm_minmax_fp32_test",
timeout = "moderate",
srcs = [
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 9ce36a3..1c71c90 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -2883,15 +2883,27 @@
src/math/sigmoid-avx2-rr2-p5-div.c
src/math/sigmoid-avx2-rr2-p5-nr1fma.c
src/math/sigmoid-avx2-rr2-p5-nr2fma.c
+ src/qc8-dwconv/gen/up8x9-minmax-fp32-avx2-mul32.c
+ src/qc8-dwconv/gen/up8x25-minmax-fp32-avx2-mul32.c
+ src/qc8-dwconv/gen/up16x9-minmax-fp32-avx2-mul16.c
+ src/qc8-dwconv/gen/up16x9-minmax-fp32-avx2-mul32.c
+ src/qc8-dwconv/gen/up16x25-minmax-fp32-avx2-mul16.c
+ src/qc8-dwconv/gen/up16x25-minmax-fp32-avx2-mul32.c
+ src/qc8-dwconv/gen/up24x9-minmax-fp32-avx2-mul32.c
+ src/qc8-dwconv/gen/up24x25-minmax-fp32-avx2-mul32.c
+ src/qc8-dwconv/gen/up32x9-minmax-fp32-avx2-mul16.c
+ src/qc8-dwconv/gen/up32x9-minmax-fp32-avx2-mul32.c
+ src/qc8-dwconv/gen/up32x25-minmax-fp32-avx2-mul16.c
+ src/qc8-dwconv/gen/up32x25-minmax-fp32-avx2-mul32.c
src/qc8-gemm/gen/1x8c8-minmax-fp32-avx2.c
src/qc8-gemm/gen/1x8c8-xw-minmax-fp32-avx2.c
src/qc8-gemm/gen/2x8c8-minmax-fp32-avx2.c
src/qc8-gemm/gen/2x8c8-xw-minmax-fp32-avx2.c
src/qc8-gemm/gen/3x8c8-minmax-fp32-avx2.c
src/qc8-gemm/gen/3x8c8-xw-minmax-fp32-avx2.c
- src/qs8-igemm/gen/1x8c8-minmax-fp32-avx2.c
- src/qs8-igemm/gen/2x8c8-minmax-fp32-avx2.c
- src/qs8-igemm/gen/3x8c8-minmax-fp32-avx2.c
+ src/qc8-igemm/gen/1x8c8-minmax-fp32-avx2.c
+ src/qc8-igemm/gen/2x8c8-minmax-fp32-avx2.c
+ src/qc8-igemm/gen/3x8c8-minmax-fp32-avx2.c
src/qs8-dwconv/gen/up8x9-minmax-fp32-avx2-mul32.c
src/qs8-dwconv/gen/up8x9-minmax-gemmlowp-avx2-mul32.c
src/qs8-dwconv/gen/up8x25-minmax-fp32-avx2-mul32.c
@@ -4942,6 +4954,15 @@
TARGET_LINK_LIBRARIES(f32-vrsubc-relu-test PRIVATE XNNPACK cpuinfo fp16 gtest gtest_main)
ADD_TEST(f32-vrsubc-relu-test f32-vrsubc-relu-test)
+ ADD_EXECUTABLE(qc8-dwconv-minmax-fp32-test test/qc8-dwconv-minmax-fp32.cc)
+ SET_TARGET_PROPERTIES(qc8-dwconv-minmax-fp32-test PROPERTIES
+ CXX_STANDARD 11
+ CXX_STANDARD_REQUIRED YES
+ CXX_EXTENSIONS YES)
+ TARGET_INCLUDE_DIRECTORIES(qc8-dwconv-minmax-fp32-test PRIVATE src test)
+ TARGET_LINK_LIBRARIES(qc8-dwconv-minmax-fp32-test PRIVATE XNNPACK cpuinfo fp16 gtest gtest_main)
+ ADD_TEST(qc8-dwconv-minmax-fp32-test qc8-dwconv-minmax-fp32-test)
+
ADD_EXECUTABLE(qc8-gemm-minmax-fp32-test test/qc8-gemm-minmax-fp32.cc)
SET_TARGET_PROPERTIES(qc8-gemm-minmax-fp32-test PROPERTIES
CXX_STANDARD 11
diff --git a/bench/f16-dwconv.cc b/bench/f16-dwconv.cc
index 8c27d83..8476ed7 100644
--- a/bench/f16-dwconv.cc
+++ b/bench/f16-dwconv.cc
@@ -91,7 +91,7 @@
std::vector<uint16_t, AlignedAllocator<uint16_t, 32>> w(w_elements * num_buffers);
std::fill(w.begin(), w.end(), 0.0f);
xnn_pack_f16_dwconv_ghw_w(kernel_height, kernel_width, channels, cr,
- k.data(), b.data(), w.data(), nullptr);
+ k.data(), b.data(), w.data(), 0 /* extra bytes */, nullptr);
for (size_t n = 1; n < num_buffers; n++) {
std::copy(w.cbegin(), w.cbegin() + w_elements, w.begin() + n * w_elements);
}
diff --git a/bench/f32-dwconv.cc b/bench/f32-dwconv.cc
index d4766c3..9dbcad7 100644
--- a/bench/f32-dwconv.cc
+++ b/bench/f32-dwconv.cc
@@ -83,7 +83,7 @@
std::vector<float, AlignedAllocator<float, 32>> w(w_elements * num_buffers);
std::fill(w.begin(), w.end(), 0.0f);
xnn_pack_f32_dwconv_ghw_w(kernel_height, kernel_width, channels, cr,
- k.data(), b.data(), w.data(), nullptr);
+ k.data(), b.data(), w.data(), 0 /* extra bytes */, nullptr);
for (size_t n = 1; n < num_buffers; n++) {
std::copy(w.cbegin(), w.cbegin() + w_elements, w.begin() + n * w_elements);
}
diff --git a/scripts/generate-qs8-dwconv.sh b/scripts/generate-qs8-dwconv.sh
index 997fd8f..9c7cd9d 100755
--- a/scripts/generate-qs8-dwconv.sh
+++ b/scripts/generate-qs8-dwconv.sh
@@ -139,37 +139,53 @@
tools/xngen src/qs8-dwconv/unipass-sse-mul32.c.in -D CHANNEL_TILE=24 -D KERNEL_TILE=25 -D SSE=4 -D AVX=1 -D REQUANTIZATION=FP32 -D XOP=1 -o src/qs8-dwconv/gen/up24x25-minmax-fp32-xop-mul32.c
################################### x86 AVX2 ##################################
-tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up16x9-minmax-gemmlowp-avx2-mul16.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=9 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up32x9-minmax-gemmlowp-avx2-mul16.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D CHANNELWISE=0 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up16x9-minmax-gemmlowp-avx2-mul16.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=9 -D CHANNELWISE=0 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up32x9-minmax-gemmlowp-avx2-mul16.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up16x9-minmax-fp32-avx2-mul16.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up32x9-minmax-fp32-avx2-mul16.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D CHANNELWISE=0 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up16x9-minmax-fp32-avx2-mul16.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=9 -D CHANNELWISE=0 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up32x9-minmax-fp32-avx2-mul16.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=25 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up16x25-minmax-gemmlowp-avx2-mul16.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=25 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up32x25-minmax-gemmlowp-avx2-mul16.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D CHANNELWISE=1 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up16x9-minmax-fp32-avx2-mul16.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=9 -D CHANNELWISE=1 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up32x9-minmax-fp32-avx2-mul16.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up16x25-minmax-fp32-avx2-mul16.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up32x25-minmax-fp32-avx2-mul16.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=25 -D CHANNELWISE=0 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up16x25-minmax-gemmlowp-avx2-mul16.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=25 -D CHANNELWISE=0 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up32x25-minmax-gemmlowp-avx2-mul16.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=9 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up8x9-minmax-gemmlowp-avx2-mul32.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up16x9-minmax-gemmlowp-avx2-mul32.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=24 -D KERNEL_TILE=9 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up24x9-minmax-gemmlowp-avx2-mul32.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=9 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up32x9-minmax-gemmlowp-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=25 -D CHANNELWISE=0 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up16x25-minmax-fp32-avx2-mul16.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=25 -D CHANNELWISE=0 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up32x25-minmax-fp32-avx2-mul16.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up8x9-minmax-fp32-avx2-mul32.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up16x9-minmax-fp32-avx2-mul32.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=24 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up24x9-minmax-fp32-avx2-mul32.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=9 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up32x9-minmax-fp32-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=25 -D CHANNELWISE=1 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up16x25-minmax-fp32-avx2-mul16.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul16.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=25 -D CHANNELWISE=1 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up32x25-minmax-fp32-avx2-mul16.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=25 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up8x25-minmax-gemmlowp-avx2-mul32.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=25 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up16x25-minmax-gemmlowp-avx2-mul32.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=24 -D KERNEL_TILE=25 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up24x25-minmax-gemmlowp-avx2-mul32.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=25 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up32x25-minmax-gemmlowp-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=9 -D CHANNELWISE=0 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up8x9-minmax-gemmlowp-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D CHANNELWISE=0 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up16x9-minmax-gemmlowp-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=24 -D KERNEL_TILE=9 -D CHANNELWISE=0 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up24x9-minmax-gemmlowp-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=9 -D CHANNELWISE=0 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up32x9-minmax-gemmlowp-avx2-mul32.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up8x25-minmax-fp32-avx2-mul32.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up16x25-minmax-fp32-avx2-mul32.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=24 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up24x25-minmax-fp32-avx2-mul32.c
-tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=25 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up32x25-minmax-fp32-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=9 -D CHANNELWISE=0 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up8x9-minmax-fp32-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D CHANNELWISE=0 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up16x9-minmax-fp32-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=24 -D KERNEL_TILE=9 -D CHANNELWISE=0 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up24x9-minmax-fp32-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=9 -D CHANNELWISE=0 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up32x9-minmax-fp32-avx2-mul32.c
+
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=9 -D CHANNELWISE=1 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up8x9-minmax-fp32-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D CHANNELWISE=1 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up16x9-minmax-fp32-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=24 -D KERNEL_TILE=9 -D CHANNELWISE=1 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up24x9-minmax-fp32-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=9 -D CHANNELWISE=1 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up32x9-minmax-fp32-avx2-mul32.c
+
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=25 -D CHANNELWISE=0 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up8x25-minmax-gemmlowp-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=25 -D CHANNELWISE=0 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up16x25-minmax-gemmlowp-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=24 -D KERNEL_TILE=25 -D CHANNELWISE=0 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up24x25-minmax-gemmlowp-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=25 -D CHANNELWISE=0 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up32x25-minmax-gemmlowp-avx2-mul32.c
+
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=25 -D CHANNELWISE=0 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up8x25-minmax-fp32-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=25 -D CHANNELWISE=0 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up16x25-minmax-fp32-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=24 -D KERNEL_TILE=25 -D CHANNELWISE=0 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up24x25-minmax-fp32-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=25 -D CHANNELWISE=0 -D REQUANTIZATION=FP32 -o src/qs8-dwconv/gen/up32x25-minmax-fp32-avx2-mul32.c
+
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=25 -D CHANNELWISE=1 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up8x25-minmax-fp32-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=25 -D CHANNELWISE=1 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up16x25-minmax-fp32-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=24 -D KERNEL_TILE=25 -D CHANNELWISE=1 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up24x25-minmax-fp32-avx2-mul32.c
+tools/xngen src/qs8-dwconv/unipass-avx2-mul32.c.in -D CHANNEL_TILE=32 -D KERNEL_TILE=25 -D CHANNELWISE=1 -D REQUANTIZATION=FP32 -o src/qc8-dwconv/gen/up32x25-minmax-fp32-avx2-mul32.c
################################## x86 AVX512 #################################
tools/xngen src/qs8-dwconv/unipass-avx512skx-mul32.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D REQUANTIZATION=GEMMLOWP -o src/qs8-dwconv/gen/up16x9-minmax-gemmlowp-avx512skx-mul32.c
@@ -187,3 +203,4 @@
################################## Unit tests #################################
tools/generate-dwconv-test.py --spec test/qs8-dwconv-minmax-gemmlowp.yaml --output test/qs8-dwconv-minmax-gemmlowp.cc
tools/generate-dwconv-test.py --spec test/qs8-dwconv-minmax-fp32.yaml --output test/qs8-dwconv-minmax-fp32.cc
+tools/generate-dwconv-test.py --spec test/qc8-dwconv-minmax-fp32.yaml --output test/qc8-dwconv-minmax-fp32.cc
diff --git a/src/operators/convolution-nhwc.c b/src/operators/convolution-nhwc.c
index baea5cd..2d5f051 100644
--- a/src/operators/convolution-nhwc.c
+++ b/src/operators/convolution-nhwc.c
@@ -289,7 +289,7 @@
pack_dwconv_ghw_w(
kernel_height, kernel_width,
groups, dwconv_ukernel->channel_tile,
- kernel, bias, convolution_op->packed_weights, packing_params);
+ kernel, bias, convolution_op->packed_weights, 0 /* extra bytes */, packing_params);
}
const union dwconv_fused_ukernels* ukernels = &dwconv_ukernel->minmax;
diff --git a/src/packing.c b/src/packing.c
index 1f4a6b4..c5eb513 100644
--- a/src/packing.c
+++ b/src/packing.c
@@ -1108,6 +1108,7 @@
const float* k,
const float* b,
float* packed_w,
+ size_t extra_bytes,
const void* params)
{
for (size_t cr_block_start = 0; cr_block_start < c; cr_block_start += cr) {
@@ -1132,6 +1133,7 @@
packed_w += cr - cr_block_size;
}
}
+ packed_w = (float*) ((uintptr_t) packed_w + extra_bytes);
}
}
@@ -1143,6 +1145,7 @@
const uint16_t* k,
const uint16_t* b,
uint16_t* packed_w,
+ size_t extra_bytes,
const void* params)
{
for (size_t cr_block_start = 0; cr_block_start < c; cr_block_start += cr) {
@@ -1167,6 +1170,7 @@
packed_w += cr - cr_block_size;
}
}
+ packed_w = (uint16_t*) ((uintptr_t) packed_w + extra_bytes);
}
}
@@ -1178,6 +1182,7 @@
const uint8_t* k,
const int32_t* b,
void* packed_w,
+ size_t extra_bytes,
const struct xnn_qu8_packing_params* params)
{
const int32_t izp = (int32_t) params->input_zero_point;
@@ -1209,6 +1214,7 @@
packed_w = (void*) ((uintptr_t) packed_w + (cr - cr_block_size) * sizeof(uint8_t));
}
}
+ packed_w = (void*) ((uintptr_t) packed_w + extra_bytes);
}
}
@@ -1220,6 +1226,7 @@
const int8_t* k,
const int32_t* b,
void* packed_w,
+ size_t extra_bytes,
const struct xnn_qs8_packing_params* params)
{
const int32_t izp = (int32_t) params->input_zero_point;
@@ -1250,6 +1257,7 @@
packed_w = (void*) ((uintptr_t) packed_w + (cr - cr_block_size) * sizeof(int8_t));
}
}
+ packed_w = (void*) ((uintptr_t) packed_w + extra_bytes);
}
}
diff --git a/src/qc8-dwconv/gen/up16x25-minmax-fp32-avx2-mul16.c b/src/qc8-dwconv/gen/up16x25-minmax-fp32-avx2-mul16.c
new file mode 100644
index 0000000..71bafc8
--- /dev/null
+++ b/src/qc8-dwconv/gen/up16x25-minmax-fp32-avx2-mul16.c
@@ -0,0 +1,671 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/unipass-avx2-mul16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ const int8_t* i9 = input[9];
+ assert(i9 != NULL);
+ if XNN_UNPREDICTABLE(i9 != zero) {
+ i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
+ }
+ const int8_t* i10 = input[10];
+ assert(i10 != NULL);
+ if XNN_UNPREDICTABLE(i10 != zero) {
+ i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
+ }
+ const int8_t* i11 = input[11];
+ assert(i11 != NULL);
+ if XNN_UNPREDICTABLE(i11 != zero) {
+ i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
+ }
+ const int8_t* i12 = input[12];
+ assert(i12 != NULL);
+ if XNN_UNPREDICTABLE(i12 != zero) {
+ i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
+ }
+ const int8_t* i13 = input[13];
+ assert(i13 != NULL);
+ if XNN_UNPREDICTABLE(i13 != zero) {
+ i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
+ }
+ const int8_t* i14 = input[14];
+ assert(i14 != NULL);
+ if XNN_UNPREDICTABLE(i14 != zero) {
+ i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
+ }
+ const int8_t* i15 = input[15];
+ assert(i15 != NULL);
+ if XNN_UNPREDICTABLE(i15 != zero) {
+ i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
+ }
+ const int8_t* i16 = input[16];
+ assert(i16 != NULL);
+ if XNN_UNPREDICTABLE(i16 != zero) {
+ i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
+ }
+ const int8_t* i17 = input[17];
+ assert(i17 != NULL);
+ if XNN_UNPREDICTABLE(i17 != zero) {
+ i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
+ }
+ const int8_t* i18 = input[18];
+ assert(i18 != NULL);
+ if XNN_UNPREDICTABLE(i18 != zero) {
+ i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
+ }
+ const int8_t* i19 = input[19];
+ assert(i19 != NULL);
+ if XNN_UNPREDICTABLE(i19 != zero) {
+ i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
+ }
+ const int8_t* i20 = input[20];
+ assert(i20 != NULL);
+ if XNN_UNPREDICTABLE(i20 != zero) {
+ i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
+ }
+ const int8_t* i21 = input[21];
+ assert(i21 != NULL);
+ if XNN_UNPREDICTABLE(i21 != zero) {
+ i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
+ }
+ const int8_t* i22 = input[22];
+ assert(i22 != NULL);
+ if XNN_UNPREDICTABLE(i22 != zero) {
+ i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
+ }
+ const int8_t* i23 = input[23];
+ assert(i23 != NULL);
+ if XNN_UNPREDICTABLE(i23 != zero) {
+ i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
+ }
+ const int8_t* i24 = input[24];
+ assert(i24 != NULL);
+ if XNN_UNPREDICTABLE(i24 != zero) {
+ i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const void* w = weights;
+ for (; c >= 16; c -= 16) {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+ __m256i vacc89ABCDEF = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+
+
+ const __m256i vi0x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i0));
+ const __m256i vk0x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t))));
+ i0 += 16;
+
+ const __m256i vprod0x0123456789ABCDEF = _mm256_mullo_epi16(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF);
+ const __m128i vprod0x89ABCDEF = _mm256_extracti128_si256(vprod0x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod0x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod0x89ABCDEF));
+
+ const __m256i vi1x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i1));
+ const __m256i vk1x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t))));
+ i1 += 16;
+
+ const __m256i vprod1x0123456789ABCDEF = _mm256_mullo_epi16(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
+ const __m128i vprod1x89ABCDEF = _mm256_extracti128_si256(vprod1x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod1x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod1x89ABCDEF));
+
+ const __m256i vi2x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i2));
+ const __m256i vk2x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t))));
+ i2 += 16;
+
+ const __m256i vprod2x0123456789ABCDEF = _mm256_mullo_epi16(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF);
+ const __m128i vprod2x89ABCDEF = _mm256_extracti128_si256(vprod2x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod2x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod2x89ABCDEF));
+
+ const __m256i vi3x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i3));
+ const __m256i vk3x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t))));
+ i3 += 16;
+
+ const __m256i vprod3x0123456789ABCDEF = _mm256_mullo_epi16(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF);
+ const __m128i vprod3x89ABCDEF = _mm256_extracti128_si256(vprod3x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod3x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod3x89ABCDEF));
+
+ const __m256i vi4x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i4));
+ const __m256i vk4x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t))));
+ i4 += 16;
+
+ const __m256i vprod4x0123456789ABCDEF = _mm256_mullo_epi16(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF);
+ const __m128i vprod4x89ABCDEF = _mm256_extracti128_si256(vprod4x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod4x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod4x89ABCDEF));
+
+ const __m256i vi5x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i5));
+ const __m256i vk5x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t))));
+ i5 += 16;
+
+ const __m256i vprod5x0123456789ABCDEF = _mm256_mullo_epi16(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF);
+ const __m128i vprod5x89ABCDEF = _mm256_extracti128_si256(vprod5x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod5x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod5x89ABCDEF));
+
+ const __m256i vi6x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i6));
+ const __m256i vk6x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t))));
+ i6 += 16;
+
+ const __m256i vprod6x0123456789ABCDEF = _mm256_mullo_epi16(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF);
+ const __m128i vprod6x89ABCDEF = _mm256_extracti128_si256(vprod6x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod6x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod6x89ABCDEF));
+
+ const __m256i vi7x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i7));
+ const __m256i vk7x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t))));
+ i7 += 16;
+
+ const __m256i vprod7x0123456789ABCDEF = _mm256_mullo_epi16(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF);
+ const __m128i vprod7x89ABCDEF = _mm256_extracti128_si256(vprod7x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod7x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod7x89ABCDEF));
+
+ const __m256i vi8x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i8));
+ const __m256i vk8x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t))));
+ i8 += 16;
+
+ const __m256i vprod8x0123456789ABCDEF = _mm256_mullo_epi16(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF);
+ const __m128i vprod8x89ABCDEF = _mm256_extracti128_si256(vprod8x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod8x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod8x89ABCDEF));
+
+ const __m256i vi9x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i9));
+ const __m256i vk9x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t))));
+ i9 += 16;
+
+ const __m256i vprod9x0123456789ABCDEF = _mm256_mullo_epi16(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF);
+ const __m128i vprod9x89ABCDEF = _mm256_extracti128_si256(vprod9x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod9x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod9x89ABCDEF));
+
+ const __m256i vi10x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i10));
+ const __m256i vk10x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 160 * sizeof(int8_t))));
+ i10 += 16;
+
+ const __m256i vprod10x0123456789ABCDEF = _mm256_mullo_epi16(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF);
+ const __m128i vprod10x89ABCDEF = _mm256_extracti128_si256(vprod10x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod10x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod10x89ABCDEF));
+
+ const __m256i vi11x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i11));
+ const __m256i vk11x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 176 * sizeof(int8_t))));
+ i11 += 16;
+
+ const __m256i vprod11x0123456789ABCDEF = _mm256_mullo_epi16(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF);
+ const __m128i vprod11x89ABCDEF = _mm256_extracti128_si256(vprod11x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod11x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod11x89ABCDEF));
+
+ const __m256i vi12x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i12));
+ const __m256i vk12x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 192 * sizeof(int8_t))));
+ i12 += 16;
+
+ const __m256i vprod12x0123456789ABCDEF = _mm256_mullo_epi16(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF);
+ const __m128i vprod12x89ABCDEF = _mm256_extracti128_si256(vprod12x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod12x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod12x89ABCDEF));
+
+ const __m256i vi13x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i13));
+ const __m256i vk13x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 208 * sizeof(int8_t))));
+ i13 += 16;
+
+ const __m256i vprod13x0123456789ABCDEF = _mm256_mullo_epi16(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF);
+ const __m128i vprod13x89ABCDEF = _mm256_extracti128_si256(vprod13x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod13x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod13x89ABCDEF));
+
+ const __m256i vi14x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i14));
+ const __m256i vk14x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 224 * sizeof(int8_t))));
+ i14 += 16;
+
+ const __m256i vprod14x0123456789ABCDEF = _mm256_mullo_epi16(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF);
+ const __m128i vprod14x89ABCDEF = _mm256_extracti128_si256(vprod14x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod14x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod14x89ABCDEF));
+
+ const __m256i vi15x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i15));
+ const __m256i vk15x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 240 * sizeof(int8_t))));
+ i15 += 16;
+
+ const __m256i vprod15x0123456789ABCDEF = _mm256_mullo_epi16(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF);
+ const __m128i vprod15x89ABCDEF = _mm256_extracti128_si256(vprod15x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod15x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod15x89ABCDEF));
+
+ const __m256i vi16x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i16));
+ const __m256i vk16x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 256 * sizeof(int8_t))));
+ i16 += 16;
+
+ const __m256i vprod16x0123456789ABCDEF = _mm256_mullo_epi16(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF);
+ const __m128i vprod16x89ABCDEF = _mm256_extracti128_si256(vprod16x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod16x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod16x89ABCDEF));
+
+ const __m256i vi17x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i17));
+ const __m256i vk17x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 272 * sizeof(int8_t))));
+ i17 += 16;
+
+ const __m256i vprod17x0123456789ABCDEF = _mm256_mullo_epi16(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF);
+ const __m128i vprod17x89ABCDEF = _mm256_extracti128_si256(vprod17x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod17x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod17x89ABCDEF));
+
+ const __m256i vi18x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i18));
+ const __m256i vk18x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 288 * sizeof(int8_t))));
+ i18 += 16;
+
+ const __m256i vprod18x0123456789ABCDEF = _mm256_mullo_epi16(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF);
+ const __m128i vprod18x89ABCDEF = _mm256_extracti128_si256(vprod18x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod18x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod18x89ABCDEF));
+
+ const __m256i vi19x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i19));
+ const __m256i vk19x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 304 * sizeof(int8_t))));
+ i19 += 16;
+
+ const __m256i vprod19x0123456789ABCDEF = _mm256_mullo_epi16(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF);
+ const __m128i vprod19x89ABCDEF = _mm256_extracti128_si256(vprod19x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod19x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod19x89ABCDEF));
+
+ const __m256i vi20x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i20));
+ const __m256i vk20x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 320 * sizeof(int8_t))));
+ i20 += 16;
+
+ const __m256i vprod20x0123456789ABCDEF = _mm256_mullo_epi16(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF);
+ const __m128i vprod20x89ABCDEF = _mm256_extracti128_si256(vprod20x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod20x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod20x89ABCDEF));
+
+ const __m256i vi21x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i21));
+ const __m256i vk21x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 336 * sizeof(int8_t))));
+ i21 += 16;
+
+ const __m256i vprod21x0123456789ABCDEF = _mm256_mullo_epi16(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF);
+ const __m128i vprod21x89ABCDEF = _mm256_extracti128_si256(vprod21x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod21x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod21x89ABCDEF));
+
+ const __m256i vi22x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i22));
+ const __m256i vk22x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 352 * sizeof(int8_t))));
+ i22 += 16;
+
+ const __m256i vprod22x0123456789ABCDEF = _mm256_mullo_epi16(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF);
+ const __m128i vprod22x89ABCDEF = _mm256_extracti128_si256(vprod22x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod22x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod22x89ABCDEF));
+
+ const __m256i vi23x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i23));
+ const __m256i vk23x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 368 * sizeof(int8_t))));
+ i23 += 16;
+
+ const __m256i vprod23x0123456789ABCDEF = _mm256_mullo_epi16(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF);
+ const __m128i vprod23x89ABCDEF = _mm256_extracti128_si256(vprod23x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod23x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod23x89ABCDEF));
+
+ const __m256i vi24x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i24));
+ const __m256i vk24x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 384 * sizeof(int8_t))));
+ i24 += 16;
+
+ const __m256i vprod24x0123456789ABCDEF = _mm256_mullo_epi16(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF);
+ const __m128i vprod24x89ABCDEF = _mm256_extracti128_si256(vprod24x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod24x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod24x89ABCDEF));
+
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 400 * sizeof(int8_t));
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ __m256 vscaled89ABCDEF = _mm256_cvtepi32_ps(vacc89ABCDEF);
+
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w);
+ const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(float)));
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(float));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vscaled89ABCDEF = _mm256_mul_ps(vscaled89ABCDEF, vscale89ABCDEF);
+
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+
+ const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
+ const __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
+
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
+ vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ output += 16;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+ __m256i vacc89ABCDEF = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+
+
+ const __m256i vi0x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i0));
+ const __m256i vk0x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t))));
+
+ const __m256i vprod0x0123456789ABCDEF = _mm256_mullo_epi16(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF);
+ const __m128i vprod0x89ABCDEF = _mm256_extracti128_si256(vprod0x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod0x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod0x89ABCDEF));
+
+ const __m256i vi1x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i1));
+ const __m256i vk1x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t))));
+
+ const __m256i vprod1x0123456789ABCDEF = _mm256_mullo_epi16(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
+ const __m128i vprod1x89ABCDEF = _mm256_extracti128_si256(vprod1x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod1x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod1x89ABCDEF));
+
+ const __m256i vi2x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i2));
+ const __m256i vk2x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t))));
+
+ const __m256i vprod2x0123456789ABCDEF = _mm256_mullo_epi16(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF);
+ const __m128i vprod2x89ABCDEF = _mm256_extracti128_si256(vprod2x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod2x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod2x89ABCDEF));
+
+ const __m256i vi3x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i3));
+ const __m256i vk3x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t))));
+
+ const __m256i vprod3x0123456789ABCDEF = _mm256_mullo_epi16(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF);
+ const __m128i vprod3x89ABCDEF = _mm256_extracti128_si256(vprod3x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod3x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod3x89ABCDEF));
+
+ const __m256i vi4x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i4));
+ const __m256i vk4x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t))));
+
+ const __m256i vprod4x0123456789ABCDEF = _mm256_mullo_epi16(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF);
+ const __m128i vprod4x89ABCDEF = _mm256_extracti128_si256(vprod4x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod4x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod4x89ABCDEF));
+
+ const __m256i vi5x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i5));
+ const __m256i vk5x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t))));
+
+ const __m256i vprod5x0123456789ABCDEF = _mm256_mullo_epi16(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF);
+ const __m128i vprod5x89ABCDEF = _mm256_extracti128_si256(vprod5x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod5x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod5x89ABCDEF));
+
+ const __m256i vi6x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i6));
+ const __m256i vk6x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t))));
+
+ const __m256i vprod6x0123456789ABCDEF = _mm256_mullo_epi16(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF);
+ const __m128i vprod6x89ABCDEF = _mm256_extracti128_si256(vprod6x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod6x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod6x89ABCDEF));
+
+ const __m256i vi7x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i7));
+ const __m256i vk7x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t))));
+
+ const __m256i vprod7x0123456789ABCDEF = _mm256_mullo_epi16(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF);
+ const __m128i vprod7x89ABCDEF = _mm256_extracti128_si256(vprod7x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod7x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod7x89ABCDEF));
+
+ const __m256i vi8x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i8));
+ const __m256i vk8x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t))));
+
+ const __m256i vprod8x0123456789ABCDEF = _mm256_mullo_epi16(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF);
+ const __m128i vprod8x89ABCDEF = _mm256_extracti128_si256(vprod8x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod8x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod8x89ABCDEF));
+
+ const __m256i vi9x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i9));
+ const __m256i vk9x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t))));
+
+ const __m256i vprod9x0123456789ABCDEF = _mm256_mullo_epi16(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF);
+ const __m128i vprod9x89ABCDEF = _mm256_extracti128_si256(vprod9x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod9x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod9x89ABCDEF));
+
+ const __m256i vi10x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i10));
+ const __m256i vk10x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 160 * sizeof(int8_t))));
+
+ const __m256i vprod10x0123456789ABCDEF = _mm256_mullo_epi16(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF);
+ const __m128i vprod10x89ABCDEF = _mm256_extracti128_si256(vprod10x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod10x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod10x89ABCDEF));
+
+ const __m256i vi11x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i11));
+ const __m256i vk11x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 176 * sizeof(int8_t))));
+
+ const __m256i vprod11x0123456789ABCDEF = _mm256_mullo_epi16(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF);
+ const __m128i vprod11x89ABCDEF = _mm256_extracti128_si256(vprod11x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod11x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod11x89ABCDEF));
+
+ const __m256i vi12x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i12));
+ const __m256i vk12x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 192 * sizeof(int8_t))));
+
+ const __m256i vprod12x0123456789ABCDEF = _mm256_mullo_epi16(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF);
+ const __m128i vprod12x89ABCDEF = _mm256_extracti128_si256(vprod12x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod12x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod12x89ABCDEF));
+
+ const __m256i vi13x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i13));
+ const __m256i vk13x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 208 * sizeof(int8_t))));
+
+ const __m256i vprod13x0123456789ABCDEF = _mm256_mullo_epi16(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF);
+ const __m128i vprod13x89ABCDEF = _mm256_extracti128_si256(vprod13x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod13x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod13x89ABCDEF));
+
+ const __m256i vi14x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i14));
+ const __m256i vk14x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 224 * sizeof(int8_t))));
+
+ const __m256i vprod14x0123456789ABCDEF = _mm256_mullo_epi16(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF);
+ const __m128i vprod14x89ABCDEF = _mm256_extracti128_si256(vprod14x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod14x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod14x89ABCDEF));
+
+ const __m256i vi15x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i15));
+ const __m256i vk15x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 240 * sizeof(int8_t))));
+
+ const __m256i vprod15x0123456789ABCDEF = _mm256_mullo_epi16(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF);
+ const __m128i vprod15x89ABCDEF = _mm256_extracti128_si256(vprod15x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod15x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod15x89ABCDEF));
+
+ const __m256i vi16x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i16));
+ const __m256i vk16x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 256 * sizeof(int8_t))));
+
+ const __m256i vprod16x0123456789ABCDEF = _mm256_mullo_epi16(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF);
+ const __m128i vprod16x89ABCDEF = _mm256_extracti128_si256(vprod16x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod16x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod16x89ABCDEF));
+
+ const __m256i vi17x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i17));
+ const __m256i vk17x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 272 * sizeof(int8_t))));
+
+ const __m256i vprod17x0123456789ABCDEF = _mm256_mullo_epi16(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF);
+ const __m128i vprod17x89ABCDEF = _mm256_extracti128_si256(vprod17x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod17x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod17x89ABCDEF));
+
+ const __m256i vi18x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i18));
+ const __m256i vk18x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 288 * sizeof(int8_t))));
+
+ const __m256i vprod18x0123456789ABCDEF = _mm256_mullo_epi16(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF);
+ const __m128i vprod18x89ABCDEF = _mm256_extracti128_si256(vprod18x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod18x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod18x89ABCDEF));
+
+ const __m256i vi19x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i19));
+ const __m256i vk19x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 304 * sizeof(int8_t))));
+
+ const __m256i vprod19x0123456789ABCDEF = _mm256_mullo_epi16(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF);
+ const __m128i vprod19x89ABCDEF = _mm256_extracti128_si256(vprod19x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod19x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod19x89ABCDEF));
+
+ const __m256i vi20x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i20));
+ const __m256i vk20x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 320 * sizeof(int8_t))));
+
+ const __m256i vprod20x0123456789ABCDEF = _mm256_mullo_epi16(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF);
+ const __m128i vprod20x89ABCDEF = _mm256_extracti128_si256(vprod20x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod20x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod20x89ABCDEF));
+
+ const __m256i vi21x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i21));
+ const __m256i vk21x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 336 * sizeof(int8_t))));
+
+ const __m256i vprod21x0123456789ABCDEF = _mm256_mullo_epi16(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF);
+ const __m128i vprod21x89ABCDEF = _mm256_extracti128_si256(vprod21x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod21x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod21x89ABCDEF));
+
+ const __m256i vi22x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i22));
+ const __m256i vk22x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 352 * sizeof(int8_t))));
+
+ const __m256i vprod22x0123456789ABCDEF = _mm256_mullo_epi16(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF);
+ const __m128i vprod22x89ABCDEF = _mm256_extracti128_si256(vprod22x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod22x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod22x89ABCDEF));
+
+ const __m256i vi23x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i23));
+ const __m256i vk23x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 368 * sizeof(int8_t))));
+
+ const __m256i vprod23x0123456789ABCDEF = _mm256_mullo_epi16(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF);
+ const __m128i vprod23x89ABCDEF = _mm256_extracti128_si256(vprod23x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod23x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod23x89ABCDEF));
+
+ const __m256i vi24x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i24));
+ const __m256i vk24x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 384 * sizeof(int8_t))));
+
+ const __m256i vprod24x0123456789ABCDEF = _mm256_mullo_epi16(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF);
+ const __m128i vprod24x89ABCDEF = _mm256_extracti128_si256(vprod24x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod24x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod24x89ABCDEF));
+
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ __m256 vscaled89ABCDEF = _mm256_cvtepi32_ps(vacc89ABCDEF);
+
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) ((uintptr_t) w + 16 * sizeof(int32_t) + 400 * sizeof(int8_t)));
+ const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) ((uintptr_t) w + 16 * sizeof(int32_t) + 400 * sizeof(int8_t) + 8 * sizeof(float)));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vscaled89ABCDEF = _mm256_mul_ps(vscaled89ABCDEF, vscale89ABCDEF);
+
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+ __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extracti128_si256(vacc89ABCDEF, 1)), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+
+ __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
+ vout0123456789ABCDEF = _mm_min_epi8(_mm_max_epi8(vout0123456789ABCDEF, voutput_min), voutput_max);
+
+ if (c & 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456789ABCDEF);
+ vout0123456789ABCDEF = _mm_unpackhi_epi64(vout0123456789ABCDEF, vout0123456789ABCDEF);
+ output += 8;
+ }
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456789ABCDEF);
+ vout0123456789ABCDEF = _mm_srli_epi64(vout0123456789ABCDEF, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456789ABCDEF, 0);
+ vout0123456789ABCDEF = _mm_srli_epi32(vout0123456789ABCDEF, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456789ABCDEF, 0);
+ output += 1;
+ }
+ }
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qc8-dwconv/gen/up16x25-minmax-fp32-avx2-mul32.c b/src/qc8-dwconv/gen/up16x25-minmax-fp32-avx2-mul32.c
new file mode 100644
index 0000000..2464f55
--- /dev/null
+++ b/src/qc8-dwconv/gen/up16x25-minmax-fp32-avx2-mul32.c
@@ -0,0 +1,619 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/unipass-avx2-mul32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ const int8_t* i9 = input[9];
+ assert(i9 != NULL);
+ if XNN_UNPREDICTABLE(i9 != zero) {
+ i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
+ }
+ const int8_t* i10 = input[10];
+ assert(i10 != NULL);
+ if XNN_UNPREDICTABLE(i10 != zero) {
+ i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
+ }
+ const int8_t* i11 = input[11];
+ assert(i11 != NULL);
+ if XNN_UNPREDICTABLE(i11 != zero) {
+ i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
+ }
+ const int8_t* i12 = input[12];
+ assert(i12 != NULL);
+ if XNN_UNPREDICTABLE(i12 != zero) {
+ i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
+ }
+ const int8_t* i13 = input[13];
+ assert(i13 != NULL);
+ if XNN_UNPREDICTABLE(i13 != zero) {
+ i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
+ }
+ const int8_t* i14 = input[14];
+ assert(i14 != NULL);
+ if XNN_UNPREDICTABLE(i14 != zero) {
+ i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
+ }
+ const int8_t* i15 = input[15];
+ assert(i15 != NULL);
+ if XNN_UNPREDICTABLE(i15 != zero) {
+ i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
+ }
+ const int8_t* i16 = input[16];
+ assert(i16 != NULL);
+ if XNN_UNPREDICTABLE(i16 != zero) {
+ i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
+ }
+ const int8_t* i17 = input[17];
+ assert(i17 != NULL);
+ if XNN_UNPREDICTABLE(i17 != zero) {
+ i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
+ }
+ const int8_t* i18 = input[18];
+ assert(i18 != NULL);
+ if XNN_UNPREDICTABLE(i18 != zero) {
+ i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
+ }
+ const int8_t* i19 = input[19];
+ assert(i19 != NULL);
+ if XNN_UNPREDICTABLE(i19 != zero) {
+ i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
+ }
+ const int8_t* i20 = input[20];
+ assert(i20 != NULL);
+ if XNN_UNPREDICTABLE(i20 != zero) {
+ i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
+ }
+ const int8_t* i21 = input[21];
+ assert(i21 != NULL);
+ if XNN_UNPREDICTABLE(i21 != zero) {
+ i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
+ }
+ const int8_t* i22 = input[22];
+ assert(i22 != NULL);
+ if XNN_UNPREDICTABLE(i22 != zero) {
+ i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
+ }
+ const int8_t* i23 = input[23];
+ assert(i23 != NULL);
+ if XNN_UNPREDICTABLE(i23 != zero) {
+ i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
+ }
+ const int8_t* i24 = input[24];
+ assert(i24 != NULL);
+ if XNN_UNPREDICTABLE(i24 != zero) {
+ i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const void* w = weights;
+ for (; c >= 16; c -= 16) {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+ __m256i vacc89ABCDEF = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+
+
+ const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
+ const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t))));
+ const __m256i vi0x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
+ const __m256i vk0x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 8 * sizeof(int8_t))));
+ i0 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi0x89ABCDEF, vk0x89ABCDEF));
+
+ const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
+ const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t))));
+ const __m256i vi1x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
+ const __m256i vk1x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 24 * sizeof(int8_t))));
+ i1 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi1x89ABCDEF, vk1x89ABCDEF));
+
+ const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
+ const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t))));
+ const __m256i vi2x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
+ const __m256i vk2x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 40 * sizeof(int8_t))));
+ i2 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi2x89ABCDEF, vk2x89ABCDEF));
+
+ const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
+ const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t))));
+ const __m256i vi3x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
+ const __m256i vk3x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 56 * sizeof(int8_t))));
+ i3 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi3x89ABCDEF, vk3x89ABCDEF));
+
+ const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
+ const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t))));
+ const __m256i vi4x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
+ const __m256i vk4x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 72 * sizeof(int8_t))));
+ i4 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi4x89ABCDEF, vk4x89ABCDEF));
+
+ const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
+ const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t))));
+ const __m256i vi5x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
+ const __m256i vk5x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 88 * sizeof(int8_t))));
+ i5 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi5x89ABCDEF, vk5x89ABCDEF));
+
+ const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
+ const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t))));
+ const __m256i vi6x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
+ const __m256i vk6x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 104 * sizeof(int8_t))));
+ i6 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi6x89ABCDEF, vk6x89ABCDEF));
+
+ const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
+ const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t))));
+ const __m256i vi7x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i7 + 8)));
+ const __m256i vk7x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 120 * sizeof(int8_t))));
+ i7 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi7x89ABCDEF, vk7x89ABCDEF));
+
+ const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
+ const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t))));
+ const __m256i vi8x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i8 + 8)));
+ const __m256i vk8x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 136 * sizeof(int8_t))));
+ i8 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi8x89ABCDEF, vk8x89ABCDEF));
+
+ const __m256i vi9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i9));
+ const __m256i vk9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t))));
+ const __m256i vi9x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i9 + 8)));
+ const __m256i vk9x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 152 * sizeof(int8_t))));
+ i9 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi9x01234567, vk9x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi9x89ABCDEF, vk9x89ABCDEF));
+
+ const __m256i vi10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i10));
+ const __m256i vk10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 160 * sizeof(int8_t))));
+ const __m256i vi10x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i10 + 8)));
+ const __m256i vk10x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 168 * sizeof(int8_t))));
+ i10 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi10x01234567, vk10x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi10x89ABCDEF, vk10x89ABCDEF));
+
+ const __m256i vi11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i11));
+ const __m256i vk11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 176 * sizeof(int8_t))));
+ const __m256i vi11x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i11 + 8)));
+ const __m256i vk11x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 184 * sizeof(int8_t))));
+ i11 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi11x01234567, vk11x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi11x89ABCDEF, vk11x89ABCDEF));
+
+ const __m256i vi12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i12));
+ const __m256i vk12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 192 * sizeof(int8_t))));
+ const __m256i vi12x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i12 + 8)));
+ const __m256i vk12x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 200 * sizeof(int8_t))));
+ i12 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi12x01234567, vk12x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi12x89ABCDEF, vk12x89ABCDEF));
+
+ const __m256i vi13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i13));
+ const __m256i vk13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 208 * sizeof(int8_t))));
+ const __m256i vi13x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i13 + 8)));
+ const __m256i vk13x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 216 * sizeof(int8_t))));
+ i13 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi13x01234567, vk13x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi13x89ABCDEF, vk13x89ABCDEF));
+
+ const __m256i vi14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i14));
+ const __m256i vk14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 224 * sizeof(int8_t))));
+ const __m256i vi14x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i14 + 8)));
+ const __m256i vk14x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 232 * sizeof(int8_t))));
+ i14 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi14x01234567, vk14x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi14x89ABCDEF, vk14x89ABCDEF));
+
+ const __m256i vi15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i15));
+ const __m256i vk15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 240 * sizeof(int8_t))));
+ const __m256i vi15x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i15 + 8)));
+ const __m256i vk15x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 248 * sizeof(int8_t))));
+ i15 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi15x01234567, vk15x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi15x89ABCDEF, vk15x89ABCDEF));
+
+ const __m256i vi16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i16));
+ const __m256i vk16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 256 * sizeof(int8_t))));
+ const __m256i vi16x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i16 + 8)));
+ const __m256i vk16x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 264 * sizeof(int8_t))));
+ i16 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi16x01234567, vk16x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi16x89ABCDEF, vk16x89ABCDEF));
+
+ const __m256i vi17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i17));
+ const __m256i vk17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 272 * sizeof(int8_t))));
+ const __m256i vi17x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i17 + 8)));
+ const __m256i vk17x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 280 * sizeof(int8_t))));
+ i17 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi17x01234567, vk17x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi17x89ABCDEF, vk17x89ABCDEF));
+
+ const __m256i vi18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i18));
+ const __m256i vk18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 288 * sizeof(int8_t))));
+ const __m256i vi18x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i18 + 8)));
+ const __m256i vk18x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 296 * sizeof(int8_t))));
+ i18 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi18x01234567, vk18x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi18x89ABCDEF, vk18x89ABCDEF));
+
+ const __m256i vi19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i19));
+ const __m256i vk19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 304 * sizeof(int8_t))));
+ const __m256i vi19x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i19 + 8)));
+ const __m256i vk19x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 312 * sizeof(int8_t))));
+ i19 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi19x01234567, vk19x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi19x89ABCDEF, vk19x89ABCDEF));
+
+ const __m256i vi20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i20));
+ const __m256i vk20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 320 * sizeof(int8_t))));
+ const __m256i vi20x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i20 + 8)));
+ const __m256i vk20x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 328 * sizeof(int8_t))));
+ i20 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi20x01234567, vk20x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi20x89ABCDEF, vk20x89ABCDEF));
+
+ const __m256i vi21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i21));
+ const __m256i vk21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 336 * sizeof(int8_t))));
+ const __m256i vi21x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i21 + 8)));
+ const __m256i vk21x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 344 * sizeof(int8_t))));
+ i21 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi21x01234567, vk21x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi21x89ABCDEF, vk21x89ABCDEF));
+
+ const __m256i vi22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i22));
+ const __m256i vk22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 352 * sizeof(int8_t))));
+ const __m256i vi22x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i22 + 8)));
+ const __m256i vk22x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 360 * sizeof(int8_t))));
+ i22 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi22x01234567, vk22x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi22x89ABCDEF, vk22x89ABCDEF));
+
+ const __m256i vi23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i23));
+ const __m256i vk23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 368 * sizeof(int8_t))));
+ const __m256i vi23x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i23 + 8)));
+ const __m256i vk23x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 376 * sizeof(int8_t))));
+ i23 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi23x01234567, vk23x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi23x89ABCDEF, vk23x89ABCDEF));
+
+ const __m256i vi24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i24));
+ const __m256i vk24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 384 * sizeof(int8_t))));
+ const __m256i vi24x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i24 + 8)));
+ const __m256i vk24x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 392 * sizeof(int8_t))));
+ i24 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi24x89ABCDEF, vk24x89ABCDEF));
+
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 400 * sizeof(int8_t));
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ __m256 vscaled89ABCDEF = _mm256_cvtepi32_ps(vacc89ABCDEF);
+
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w);
+ const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(float)));
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(float));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vscaled89ABCDEF = _mm256_mul_ps(vscaled89ABCDEF, vscale89ABCDEF);
+
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+
+ const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
+
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
+ vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ output += 16;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ const int8_t* k = (const int8_t*) ((uintptr_t) w + 16 * sizeof(int32_t));
+ do {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+
+
+ const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
+ const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) k));
+ i0 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
+
+ const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
+ const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 16)));
+ i1 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
+
+ const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
+ const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 32)));
+ i2 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
+
+ const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
+ const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 48)));
+ i3 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
+
+ const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
+ const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 64)));
+ i4 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
+
+ const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
+ const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 80)));
+ i5 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
+
+ const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
+ const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 96)));
+ i6 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
+
+ const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
+ const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 112)));
+ i7 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
+
+ const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
+ const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 128)));
+ i8 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
+
+ const __m256i vi9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i9));
+ const __m256i vk9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 144)));
+ i9 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi9x01234567, vk9x01234567));
+
+ const __m256i vi10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i10));
+ const __m256i vk10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 160)));
+ i10 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi10x01234567, vk10x01234567));
+
+ const __m256i vi11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i11));
+ const __m256i vk11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 176)));
+ i11 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi11x01234567, vk11x01234567));
+
+ const __m256i vi12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i12));
+ const __m256i vk12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 192)));
+ i12 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi12x01234567, vk12x01234567));
+
+ const __m256i vi13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i13));
+ const __m256i vk13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 208)));
+ i13 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi13x01234567, vk13x01234567));
+
+ const __m256i vi14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i14));
+ const __m256i vk14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 224)));
+ i14 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi14x01234567, vk14x01234567));
+
+ const __m256i vi15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i15));
+ const __m256i vk15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 240)));
+ i15 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi15x01234567, vk15x01234567));
+
+ const __m256i vi16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i16));
+ const __m256i vk16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 256)));
+ i16 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi16x01234567, vk16x01234567));
+
+ const __m256i vi17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i17));
+ const __m256i vk17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 272)));
+ i17 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi17x01234567, vk17x01234567));
+
+ const __m256i vi18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i18));
+ const __m256i vk18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 288)));
+ i18 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi18x01234567, vk18x01234567));
+
+ const __m256i vi19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i19));
+ const __m256i vk19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 304)));
+ i19 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi19x01234567, vk19x01234567));
+
+ const __m256i vi20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i20));
+ const __m256i vk20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 320)));
+ i20 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi20x01234567, vk20x01234567));
+
+ const __m256i vi21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i21));
+ const __m256i vk21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 336)));
+ i21 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi21x01234567, vk21x01234567));
+
+ const __m256i vi22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i22));
+ const __m256i vk22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 352)));
+ i22 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi22x01234567, vk22x01234567));
+
+ const __m256i vi23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i23));
+ const __m256i vk23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 368)));
+ i23 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi23x01234567, vk23x01234567));
+
+ const __m256i vi24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i24));
+ const __m256i vk24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 384)));
+ i24 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
+
+ k += 8;
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) ((uintptr_t) w + 16 * sizeof(int32_t) + 400 * sizeof(int8_t)));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
+ vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
+
+ if XNN_LIKELY(c >= 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ c -= 8;
+ } else {
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ output += 1;
+ }
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qc8-dwconv/gen/up16x9-minmax-fp32-avx2-mul16.c b/src/qc8-dwconv/gen/up16x9-minmax-fp32-avx2-mul16.c
new file mode 100644
index 0000000..c842f7b
--- /dev/null
+++ b/src/qc8-dwconv/gen/up16x9-minmax-fp32-avx2-mul16.c
@@ -0,0 +1,319 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/unipass-avx2-mul16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up16x9__avx2_mul16(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const void* w = weights;
+ for (; c >= 16; c -= 16) {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+ __m256i vacc89ABCDEF = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+
+
+ const __m256i vi0x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i0));
+ const __m256i vk0x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t))));
+ i0 += 16;
+
+ const __m256i vprod0x0123456789ABCDEF = _mm256_mullo_epi16(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF);
+ const __m128i vprod0x89ABCDEF = _mm256_extracti128_si256(vprod0x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod0x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod0x89ABCDEF));
+
+ const __m256i vi1x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i1));
+ const __m256i vk1x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t))));
+ i1 += 16;
+
+ const __m256i vprod1x0123456789ABCDEF = _mm256_mullo_epi16(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
+ const __m128i vprod1x89ABCDEF = _mm256_extracti128_si256(vprod1x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod1x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod1x89ABCDEF));
+
+ const __m256i vi2x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i2));
+ const __m256i vk2x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t))));
+ i2 += 16;
+
+ const __m256i vprod2x0123456789ABCDEF = _mm256_mullo_epi16(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF);
+ const __m128i vprod2x89ABCDEF = _mm256_extracti128_si256(vprod2x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod2x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod2x89ABCDEF));
+
+ const __m256i vi3x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i3));
+ const __m256i vk3x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t))));
+ i3 += 16;
+
+ const __m256i vprod3x0123456789ABCDEF = _mm256_mullo_epi16(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF);
+ const __m128i vprod3x89ABCDEF = _mm256_extracti128_si256(vprod3x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod3x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod3x89ABCDEF));
+
+ const __m256i vi4x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i4));
+ const __m256i vk4x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t))));
+ i4 += 16;
+
+ const __m256i vprod4x0123456789ABCDEF = _mm256_mullo_epi16(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF);
+ const __m128i vprod4x89ABCDEF = _mm256_extracti128_si256(vprod4x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod4x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod4x89ABCDEF));
+
+ const __m256i vi5x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i5));
+ const __m256i vk5x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t))));
+ i5 += 16;
+
+ const __m256i vprod5x0123456789ABCDEF = _mm256_mullo_epi16(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF);
+ const __m128i vprod5x89ABCDEF = _mm256_extracti128_si256(vprod5x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod5x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod5x89ABCDEF));
+
+ const __m256i vi6x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i6));
+ const __m256i vk6x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t))));
+ i6 += 16;
+
+ const __m256i vprod6x0123456789ABCDEF = _mm256_mullo_epi16(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF);
+ const __m128i vprod6x89ABCDEF = _mm256_extracti128_si256(vprod6x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod6x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod6x89ABCDEF));
+
+ const __m256i vi7x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i7));
+ const __m256i vk7x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t))));
+ i7 += 16;
+
+ const __m256i vprod7x0123456789ABCDEF = _mm256_mullo_epi16(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF);
+ const __m128i vprod7x89ABCDEF = _mm256_extracti128_si256(vprod7x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod7x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod7x89ABCDEF));
+
+ const __m256i vi8x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i8));
+ const __m256i vk8x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t))));
+ i8 += 16;
+
+ const __m256i vprod8x0123456789ABCDEF = _mm256_mullo_epi16(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF);
+ const __m128i vprod8x89ABCDEF = _mm256_extracti128_si256(vprod8x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod8x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod8x89ABCDEF));
+
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t));
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ __m256 vscaled89ABCDEF = _mm256_cvtepi32_ps(vacc89ABCDEF);
+
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w);
+ const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(float)));
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(float));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vscaled89ABCDEF = _mm256_mul_ps(vscaled89ABCDEF, vscale89ABCDEF);
+
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+
+ const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
+ const __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
+
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
+ vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ output += 16;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+ __m256i vacc89ABCDEF = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+
+
+ const __m256i vi0x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i0));
+ const __m256i vk0x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t))));
+
+ const __m256i vprod0x0123456789ABCDEF = _mm256_mullo_epi16(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF);
+ const __m128i vprod0x89ABCDEF = _mm256_extracti128_si256(vprod0x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod0x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod0x89ABCDEF));
+
+ const __m256i vi1x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i1));
+ const __m256i vk1x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t))));
+
+ const __m256i vprod1x0123456789ABCDEF = _mm256_mullo_epi16(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
+ const __m128i vprod1x89ABCDEF = _mm256_extracti128_si256(vprod1x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod1x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod1x89ABCDEF));
+
+ const __m256i vi2x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i2));
+ const __m256i vk2x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t))));
+
+ const __m256i vprod2x0123456789ABCDEF = _mm256_mullo_epi16(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF);
+ const __m128i vprod2x89ABCDEF = _mm256_extracti128_si256(vprod2x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod2x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod2x89ABCDEF));
+
+ const __m256i vi3x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i3));
+ const __m256i vk3x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t))));
+
+ const __m256i vprod3x0123456789ABCDEF = _mm256_mullo_epi16(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF);
+ const __m128i vprod3x89ABCDEF = _mm256_extracti128_si256(vprod3x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod3x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod3x89ABCDEF));
+
+ const __m256i vi4x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i4));
+ const __m256i vk4x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t))));
+
+ const __m256i vprod4x0123456789ABCDEF = _mm256_mullo_epi16(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF);
+ const __m128i vprod4x89ABCDEF = _mm256_extracti128_si256(vprod4x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod4x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod4x89ABCDEF));
+
+ const __m256i vi5x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i5));
+ const __m256i vk5x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t))));
+
+ const __m256i vprod5x0123456789ABCDEF = _mm256_mullo_epi16(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF);
+ const __m128i vprod5x89ABCDEF = _mm256_extracti128_si256(vprod5x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod5x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod5x89ABCDEF));
+
+ const __m256i vi6x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i6));
+ const __m256i vk6x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t))));
+
+ const __m256i vprod6x0123456789ABCDEF = _mm256_mullo_epi16(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF);
+ const __m128i vprod6x89ABCDEF = _mm256_extracti128_si256(vprod6x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod6x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod6x89ABCDEF));
+
+ const __m256i vi7x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i7));
+ const __m256i vk7x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t))));
+
+ const __m256i vprod7x0123456789ABCDEF = _mm256_mullo_epi16(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF);
+ const __m128i vprod7x89ABCDEF = _mm256_extracti128_si256(vprod7x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod7x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod7x89ABCDEF));
+
+ const __m256i vi8x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i8));
+ const __m256i vk8x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t))));
+
+ const __m256i vprod8x0123456789ABCDEF = _mm256_mullo_epi16(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF);
+ const __m128i vprod8x89ABCDEF = _mm256_extracti128_si256(vprod8x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod8x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod8x89ABCDEF));
+
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ __m256 vscaled89ABCDEF = _mm256_cvtepi32_ps(vacc89ABCDEF);
+
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t)));
+ const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t) + 8 * sizeof(float)));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vscaled89ABCDEF = _mm256_mul_ps(vscaled89ABCDEF, vscale89ABCDEF);
+
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+ __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extracti128_si256(vacc89ABCDEF, 1)), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+
+ __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
+ vout0123456789ABCDEF = _mm_min_epi8(_mm_max_epi8(vout0123456789ABCDEF, voutput_min), voutput_max);
+
+ if (c & 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456789ABCDEF);
+ vout0123456789ABCDEF = _mm_unpackhi_epi64(vout0123456789ABCDEF, vout0123456789ABCDEF);
+ output += 8;
+ }
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456789ABCDEF);
+ vout0123456789ABCDEF = _mm_srli_epi64(vout0123456789ABCDEF, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456789ABCDEF, 0);
+ vout0123456789ABCDEF = _mm_srli_epi32(vout0123456789ABCDEF, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456789ABCDEF, 0);
+ output += 1;
+ }
+ }
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qc8-dwconv/gen/up16x9-minmax-fp32-avx2-mul32.c b/src/qc8-dwconv/gen/up16x9-minmax-fp32-avx2-mul32.c
new file mode 100644
index 0000000..b26e394
--- /dev/null
+++ b/src/qc8-dwconv/gen/up16x9-minmax-fp32-avx2-mul32.c
@@ -0,0 +1,299 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/unipass-avx2-mul32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up16x9__avx2_mul32(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const void* w = weights;
+ for (; c >= 16; c -= 16) {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+ __m256i vacc89ABCDEF = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+
+
+ const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
+ const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t))));
+ const __m256i vi0x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
+ const __m256i vk0x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 8 * sizeof(int8_t))));
+ i0 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi0x89ABCDEF, vk0x89ABCDEF));
+
+ const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
+ const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t))));
+ const __m256i vi1x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
+ const __m256i vk1x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 24 * sizeof(int8_t))));
+ i1 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi1x89ABCDEF, vk1x89ABCDEF));
+
+ const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
+ const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t))));
+ const __m256i vi2x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
+ const __m256i vk2x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 40 * sizeof(int8_t))));
+ i2 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi2x89ABCDEF, vk2x89ABCDEF));
+
+ const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
+ const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t))));
+ const __m256i vi3x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
+ const __m256i vk3x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 56 * sizeof(int8_t))));
+ i3 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi3x89ABCDEF, vk3x89ABCDEF));
+
+ const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
+ const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t))));
+ const __m256i vi4x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
+ const __m256i vk4x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 72 * sizeof(int8_t))));
+ i4 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi4x89ABCDEF, vk4x89ABCDEF));
+
+ const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
+ const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t))));
+ const __m256i vi5x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
+ const __m256i vk5x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 88 * sizeof(int8_t))));
+ i5 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi5x89ABCDEF, vk5x89ABCDEF));
+
+ const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
+ const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t))));
+ const __m256i vi6x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
+ const __m256i vk6x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 104 * sizeof(int8_t))));
+ i6 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi6x89ABCDEF, vk6x89ABCDEF));
+
+ const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
+ const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t))));
+ const __m256i vi7x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i7 + 8)));
+ const __m256i vk7x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 120 * sizeof(int8_t))));
+ i7 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi7x89ABCDEF, vk7x89ABCDEF));
+
+ const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
+ const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t))));
+ const __m256i vi8x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i8 + 8)));
+ const __m256i vk8x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 136 * sizeof(int8_t))));
+ i8 += 16;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi8x89ABCDEF, vk8x89ABCDEF));
+
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t));
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ __m256 vscaled89ABCDEF = _mm256_cvtepi32_ps(vacc89ABCDEF);
+
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w);
+ const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(float)));
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(float));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vscaled89ABCDEF = _mm256_mul_ps(vscaled89ABCDEF, vscale89ABCDEF);
+
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+
+ const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
+
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
+ vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ output += 16;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ const int8_t* k = (const int8_t*) ((uintptr_t) w + 16 * sizeof(int32_t));
+ do {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+
+
+ const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
+ const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) k));
+ i0 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
+
+ const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
+ const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 16)));
+ i1 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
+
+ const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
+ const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 32)));
+ i2 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
+
+ const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
+ const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 48)));
+ i3 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
+
+ const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
+ const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 64)));
+ i4 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
+
+ const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
+ const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 80)));
+ i5 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
+
+ const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
+ const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 96)));
+ i6 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
+
+ const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
+ const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 112)));
+ i7 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
+
+ const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
+ const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 128)));
+ i8 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
+
+ k += 8;
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t)));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
+ vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
+
+ if XNN_LIKELY(c >= 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ c -= 8;
+ } else {
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ output += 1;
+ }
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qc8-dwconv/gen/up24x25-minmax-fp32-avx2-mul32.c b/src/qc8-dwconv/gen/up24x25-minmax-fp32-avx2-mul32.c
new file mode 100644
index 0000000..8d3f7ef
--- /dev/null
+++ b/src/qc8-dwconv/gen/up24x25-minmax-fp32-avx2-mul32.c
@@ -0,0 +1,704 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/unipass-avx2-mul32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ const int8_t* i9 = input[9];
+ assert(i9 != NULL);
+ if XNN_UNPREDICTABLE(i9 != zero) {
+ i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
+ }
+ const int8_t* i10 = input[10];
+ assert(i10 != NULL);
+ if XNN_UNPREDICTABLE(i10 != zero) {
+ i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
+ }
+ const int8_t* i11 = input[11];
+ assert(i11 != NULL);
+ if XNN_UNPREDICTABLE(i11 != zero) {
+ i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
+ }
+ const int8_t* i12 = input[12];
+ assert(i12 != NULL);
+ if XNN_UNPREDICTABLE(i12 != zero) {
+ i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
+ }
+ const int8_t* i13 = input[13];
+ assert(i13 != NULL);
+ if XNN_UNPREDICTABLE(i13 != zero) {
+ i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
+ }
+ const int8_t* i14 = input[14];
+ assert(i14 != NULL);
+ if XNN_UNPREDICTABLE(i14 != zero) {
+ i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
+ }
+ const int8_t* i15 = input[15];
+ assert(i15 != NULL);
+ if XNN_UNPREDICTABLE(i15 != zero) {
+ i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
+ }
+ const int8_t* i16 = input[16];
+ assert(i16 != NULL);
+ if XNN_UNPREDICTABLE(i16 != zero) {
+ i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
+ }
+ const int8_t* i17 = input[17];
+ assert(i17 != NULL);
+ if XNN_UNPREDICTABLE(i17 != zero) {
+ i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
+ }
+ const int8_t* i18 = input[18];
+ assert(i18 != NULL);
+ if XNN_UNPREDICTABLE(i18 != zero) {
+ i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
+ }
+ const int8_t* i19 = input[19];
+ assert(i19 != NULL);
+ if XNN_UNPREDICTABLE(i19 != zero) {
+ i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
+ }
+ const int8_t* i20 = input[20];
+ assert(i20 != NULL);
+ if XNN_UNPREDICTABLE(i20 != zero) {
+ i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
+ }
+ const int8_t* i21 = input[21];
+ assert(i21 != NULL);
+ if XNN_UNPREDICTABLE(i21 != zero) {
+ i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
+ }
+ const int8_t* i22 = input[22];
+ assert(i22 != NULL);
+ if XNN_UNPREDICTABLE(i22 != zero) {
+ i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
+ }
+ const int8_t* i23 = input[23];
+ assert(i23 != NULL);
+ if XNN_UNPREDICTABLE(i23 != zero) {
+ i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
+ }
+ const int8_t* i24 = input[24];
+ assert(i24 != NULL);
+ if XNN_UNPREDICTABLE(i24 != zero) {
+ i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const void* w = weights;
+ for (; c >= 24; c -= 24) {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+ __m256i vacc89ABCDEF = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+ __m256i vaccGHIJKLMN = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 16 * sizeof(int32_t)));
+
+
+ const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
+ const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 0 * sizeof(int8_t))));
+ const __m256i vi0x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
+ const __m256i vk0x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 8 * sizeof(int8_t))));
+ const __m256i vi0xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i0 + 16)));
+ const __m256i vk0xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 16 * sizeof(int8_t))));
+ i0 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi0x89ABCDEF, vk0x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi0xGHIJKLMN, vk0xGHIJKLMN));
+
+ const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
+ const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 24 * sizeof(int8_t))));
+ const __m256i vi1x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
+ const __m256i vk1x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 32 * sizeof(int8_t))));
+ const __m256i vi1xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i1 + 16)));
+ const __m256i vk1xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 40 * sizeof(int8_t))));
+ i1 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi1x89ABCDEF, vk1x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi1xGHIJKLMN, vk1xGHIJKLMN));
+
+ const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
+ const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 48 * sizeof(int8_t))));
+ const __m256i vi2x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
+ const __m256i vk2x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 56 * sizeof(int8_t))));
+ const __m256i vi2xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i2 + 16)));
+ const __m256i vk2xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 64 * sizeof(int8_t))));
+ i2 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi2x89ABCDEF, vk2x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi2xGHIJKLMN, vk2xGHIJKLMN));
+
+ const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
+ const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 72 * sizeof(int8_t))));
+ const __m256i vi3x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
+ const __m256i vk3x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 80 * sizeof(int8_t))));
+ const __m256i vi3xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i3 + 16)));
+ const __m256i vk3xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 88 * sizeof(int8_t))));
+ i3 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi3x89ABCDEF, vk3x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi3xGHIJKLMN, vk3xGHIJKLMN));
+
+ const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
+ const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 96 * sizeof(int8_t))));
+ const __m256i vi4x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
+ const __m256i vk4x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 104 * sizeof(int8_t))));
+ const __m256i vi4xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i4 + 16)));
+ const __m256i vk4xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 112 * sizeof(int8_t))));
+ i4 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi4x89ABCDEF, vk4x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi4xGHIJKLMN, vk4xGHIJKLMN));
+
+ const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
+ const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 120 * sizeof(int8_t))));
+ const __m256i vi5x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
+ const __m256i vk5x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 128 * sizeof(int8_t))));
+ const __m256i vi5xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i5 + 16)));
+ const __m256i vk5xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 136 * sizeof(int8_t))));
+ i5 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi5x89ABCDEF, vk5x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi5xGHIJKLMN, vk5xGHIJKLMN));
+
+ const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
+ const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 144 * sizeof(int8_t))));
+ const __m256i vi6x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
+ const __m256i vk6x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 152 * sizeof(int8_t))));
+ const __m256i vi6xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i6 + 16)));
+ const __m256i vk6xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 160 * sizeof(int8_t))));
+ i6 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi6x89ABCDEF, vk6x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi6xGHIJKLMN, vk6xGHIJKLMN));
+
+ const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
+ const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 168 * sizeof(int8_t))));
+ const __m256i vi7x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i7 + 8)));
+ const __m256i vk7x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 176 * sizeof(int8_t))));
+ const __m256i vi7xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i7 + 16)));
+ const __m256i vk7xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 184 * sizeof(int8_t))));
+ i7 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi7x89ABCDEF, vk7x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi7xGHIJKLMN, vk7xGHIJKLMN));
+
+ const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
+ const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 192 * sizeof(int8_t))));
+ const __m256i vi8x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i8 + 8)));
+ const __m256i vk8x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 200 * sizeof(int8_t))));
+ const __m256i vi8xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i8 + 16)));
+ const __m256i vk8xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 208 * sizeof(int8_t))));
+ i8 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi8x89ABCDEF, vk8x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi8xGHIJKLMN, vk8xGHIJKLMN));
+
+ const __m256i vi9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i9));
+ const __m256i vk9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 216 * sizeof(int8_t))));
+ const __m256i vi9x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i9 + 8)));
+ const __m256i vk9x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 224 * sizeof(int8_t))));
+ const __m256i vi9xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i9 + 16)));
+ const __m256i vk9xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 232 * sizeof(int8_t))));
+ i9 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi9x01234567, vk9x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi9x89ABCDEF, vk9x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi9xGHIJKLMN, vk9xGHIJKLMN));
+
+ const __m256i vi10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i10));
+ const __m256i vk10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 240 * sizeof(int8_t))));
+ const __m256i vi10x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i10 + 8)));
+ const __m256i vk10x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 248 * sizeof(int8_t))));
+ const __m256i vi10xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i10 + 16)));
+ const __m256i vk10xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 256 * sizeof(int8_t))));
+ i10 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi10x01234567, vk10x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi10x89ABCDEF, vk10x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi10xGHIJKLMN, vk10xGHIJKLMN));
+
+ const __m256i vi11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i11));
+ const __m256i vk11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 264 * sizeof(int8_t))));
+ const __m256i vi11x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i11 + 8)));
+ const __m256i vk11x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 272 * sizeof(int8_t))));
+ const __m256i vi11xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i11 + 16)));
+ const __m256i vk11xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 280 * sizeof(int8_t))));
+ i11 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi11x01234567, vk11x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi11x89ABCDEF, vk11x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi11xGHIJKLMN, vk11xGHIJKLMN));
+
+ const __m256i vi12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i12));
+ const __m256i vk12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 288 * sizeof(int8_t))));
+ const __m256i vi12x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i12 + 8)));
+ const __m256i vk12x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 296 * sizeof(int8_t))));
+ const __m256i vi12xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i12 + 16)));
+ const __m256i vk12xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 304 * sizeof(int8_t))));
+ i12 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi12x01234567, vk12x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi12x89ABCDEF, vk12x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi12xGHIJKLMN, vk12xGHIJKLMN));
+
+ const __m256i vi13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i13));
+ const __m256i vk13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 312 * sizeof(int8_t))));
+ const __m256i vi13x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i13 + 8)));
+ const __m256i vk13x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 320 * sizeof(int8_t))));
+ const __m256i vi13xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i13 + 16)));
+ const __m256i vk13xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 328 * sizeof(int8_t))));
+ i13 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi13x01234567, vk13x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi13x89ABCDEF, vk13x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi13xGHIJKLMN, vk13xGHIJKLMN));
+
+ const __m256i vi14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i14));
+ const __m256i vk14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 336 * sizeof(int8_t))));
+ const __m256i vi14x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i14 + 8)));
+ const __m256i vk14x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 344 * sizeof(int8_t))));
+ const __m256i vi14xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i14 + 16)));
+ const __m256i vk14xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 352 * sizeof(int8_t))));
+ i14 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi14x01234567, vk14x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi14x89ABCDEF, vk14x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi14xGHIJKLMN, vk14xGHIJKLMN));
+
+ const __m256i vi15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i15));
+ const __m256i vk15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 360 * sizeof(int8_t))));
+ const __m256i vi15x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i15 + 8)));
+ const __m256i vk15x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 368 * sizeof(int8_t))));
+ const __m256i vi15xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i15 + 16)));
+ const __m256i vk15xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 376 * sizeof(int8_t))));
+ i15 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi15x01234567, vk15x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi15x89ABCDEF, vk15x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi15xGHIJKLMN, vk15xGHIJKLMN));
+
+ const __m256i vi16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i16));
+ const __m256i vk16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 384 * sizeof(int8_t))));
+ const __m256i vi16x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i16 + 8)));
+ const __m256i vk16x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 392 * sizeof(int8_t))));
+ const __m256i vi16xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i16 + 16)));
+ const __m256i vk16xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 400 * sizeof(int8_t))));
+ i16 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi16x01234567, vk16x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi16x89ABCDEF, vk16x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi16xGHIJKLMN, vk16xGHIJKLMN));
+
+ const __m256i vi17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i17));
+ const __m256i vk17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 408 * sizeof(int8_t))));
+ const __m256i vi17x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i17 + 8)));
+ const __m256i vk17x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 416 * sizeof(int8_t))));
+ const __m256i vi17xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i17 + 16)));
+ const __m256i vk17xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 424 * sizeof(int8_t))));
+ i17 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi17x01234567, vk17x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi17x89ABCDEF, vk17x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi17xGHIJKLMN, vk17xGHIJKLMN));
+
+ const __m256i vi18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i18));
+ const __m256i vk18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 432 * sizeof(int8_t))));
+ const __m256i vi18x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i18 + 8)));
+ const __m256i vk18x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 440 * sizeof(int8_t))));
+ const __m256i vi18xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i18 + 16)));
+ const __m256i vk18xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 448 * sizeof(int8_t))));
+ i18 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi18x01234567, vk18x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi18x89ABCDEF, vk18x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi18xGHIJKLMN, vk18xGHIJKLMN));
+
+ const __m256i vi19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i19));
+ const __m256i vk19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 456 * sizeof(int8_t))));
+ const __m256i vi19x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i19 + 8)));
+ const __m256i vk19x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 464 * sizeof(int8_t))));
+ const __m256i vi19xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i19 + 16)));
+ const __m256i vk19xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 472 * sizeof(int8_t))));
+ i19 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi19x01234567, vk19x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi19x89ABCDEF, vk19x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi19xGHIJKLMN, vk19xGHIJKLMN));
+
+ const __m256i vi20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i20));
+ const __m256i vk20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 480 * sizeof(int8_t))));
+ const __m256i vi20x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i20 + 8)));
+ const __m256i vk20x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 488 * sizeof(int8_t))));
+ const __m256i vi20xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i20 + 16)));
+ const __m256i vk20xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 496 * sizeof(int8_t))));
+ i20 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi20x01234567, vk20x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi20x89ABCDEF, vk20x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi20xGHIJKLMN, vk20xGHIJKLMN));
+
+ const __m256i vi21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i21));
+ const __m256i vk21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 504 * sizeof(int8_t))));
+ const __m256i vi21x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i21 + 8)));
+ const __m256i vk21x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 512 * sizeof(int8_t))));
+ const __m256i vi21xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i21 + 16)));
+ const __m256i vk21xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 520 * sizeof(int8_t))));
+ i21 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi21x01234567, vk21x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi21x89ABCDEF, vk21x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi21xGHIJKLMN, vk21xGHIJKLMN));
+
+ const __m256i vi22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i22));
+ const __m256i vk22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 528 * sizeof(int8_t))));
+ const __m256i vi22x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i22 + 8)));
+ const __m256i vk22x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 536 * sizeof(int8_t))));
+ const __m256i vi22xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i22 + 16)));
+ const __m256i vk22xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 544 * sizeof(int8_t))));
+ i22 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi22x01234567, vk22x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi22x89ABCDEF, vk22x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi22xGHIJKLMN, vk22xGHIJKLMN));
+
+ const __m256i vi23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i23));
+ const __m256i vk23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 552 * sizeof(int8_t))));
+ const __m256i vi23x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i23 + 8)));
+ const __m256i vk23x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 560 * sizeof(int8_t))));
+ const __m256i vi23xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i23 + 16)));
+ const __m256i vk23xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 568 * sizeof(int8_t))));
+ i23 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi23x01234567, vk23x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi23x89ABCDEF, vk23x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi23xGHIJKLMN, vk23xGHIJKLMN));
+
+ const __m256i vi24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i24));
+ const __m256i vk24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 576 * sizeof(int8_t))));
+ const __m256i vi24x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i24 + 8)));
+ const __m256i vk24x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 584 * sizeof(int8_t))));
+ const __m256i vi24xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i24 + 16)));
+ const __m256i vk24xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 592 * sizeof(int8_t))));
+ i24 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi24x89ABCDEF, vk24x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi24xGHIJKLMN, vk24xGHIJKLMN));
+
+ w = (const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 600 * sizeof(int8_t));
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ __m256 vscaled89ABCDEF = _mm256_cvtepi32_ps(vacc89ABCDEF);
+ __m256 vscaledGHIJKLMN = _mm256_cvtepi32_ps(vaccGHIJKLMN);
+
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w);
+ const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(float)));
+ const __m256 vscaleGHIJKLMN = _mm256_loadu_ps((const float*) ((uintptr_t) w + 16 * sizeof(float)));
+ w = (const void*) ((uintptr_t) w + 24 * sizeof(float));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vscaled89ABCDEF = _mm256_mul_ps(vscaled89ABCDEF, vscale89ABCDEF);
+ vscaledGHIJKLMN = _mm256_mul_ps(vscaledGHIJKLMN, vscaleGHIJKLMN);
+
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+ vaccGHIJKLMN = _mm256_cvtps_epi32(vscaledGHIJKLMN);
+
+ const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
+ __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vaccGHIJKLMN), _mm256_extracti128_si256(vaccGHIJKLMN, 1)), _mm256_castsi256_si128(voutput_zero_point));
+
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+ __m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
+ vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
+ voutGHIJKLMNGHIJKLMN = _mm_max_epi8(voutGHIJKLMNGHIJKLMN, voutput_min);
+ voutGHIJKLMNGHIJKLMN = _mm_min_epi8(voutGHIJKLMNGHIJKLMN, voutput_max);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ _mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
+ output += 24;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ const int8_t* k = (const int8_t*) ((uintptr_t) w + 24 * sizeof(int32_t));
+ do {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+
+
+ const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
+ const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) k));
+ i0 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
+
+ const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
+ const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 24)));
+ i1 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
+
+ const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
+ const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 48)));
+ i2 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
+
+ const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
+ const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 72)));
+ i3 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
+
+ const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
+ const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 96)));
+ i4 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
+
+ const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
+ const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 120)));
+ i5 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
+
+ const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
+ const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 144)));
+ i6 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
+
+ const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
+ const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 168)));
+ i7 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
+
+ const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
+ const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 192)));
+ i8 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
+
+ const __m256i vi9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i9));
+ const __m256i vk9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 216)));
+ i9 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi9x01234567, vk9x01234567));
+
+ const __m256i vi10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i10));
+ const __m256i vk10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 240)));
+ i10 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi10x01234567, vk10x01234567));
+
+ const __m256i vi11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i11));
+ const __m256i vk11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 264)));
+ i11 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi11x01234567, vk11x01234567));
+
+ const __m256i vi12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i12));
+ const __m256i vk12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 288)));
+ i12 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi12x01234567, vk12x01234567));
+
+ const __m256i vi13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i13));
+ const __m256i vk13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 312)));
+ i13 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi13x01234567, vk13x01234567));
+
+ const __m256i vi14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i14));
+ const __m256i vk14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 336)));
+ i14 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi14x01234567, vk14x01234567));
+
+ const __m256i vi15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i15));
+ const __m256i vk15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 360)));
+ i15 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi15x01234567, vk15x01234567));
+
+ const __m256i vi16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i16));
+ const __m256i vk16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 384)));
+ i16 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi16x01234567, vk16x01234567));
+
+ const __m256i vi17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i17));
+ const __m256i vk17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 408)));
+ i17 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi17x01234567, vk17x01234567));
+
+ const __m256i vi18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i18));
+ const __m256i vk18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 432)));
+ i18 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi18x01234567, vk18x01234567));
+
+ const __m256i vi19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i19));
+ const __m256i vk19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 456)));
+ i19 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi19x01234567, vk19x01234567));
+
+ const __m256i vi20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i20));
+ const __m256i vk20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 480)));
+ i20 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi20x01234567, vk20x01234567));
+
+ const __m256i vi21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i21));
+ const __m256i vk21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 504)));
+ i21 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi21x01234567, vk21x01234567));
+
+ const __m256i vi22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i22));
+ const __m256i vk22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 528)));
+ i22 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi22x01234567, vk22x01234567));
+
+ const __m256i vi23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i23));
+ const __m256i vk23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 552)));
+ i23 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi23x01234567, vk23x01234567));
+
+ const __m256i vi24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i24));
+ const __m256i vk24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 576)));
+ i24 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
+
+ k += 8;
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) ((uintptr_t) w + 24 * sizeof(int32_t) + 600 * sizeof(int8_t)));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
+ vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
+
+ if XNN_LIKELY(c >= 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ c -= 8;
+ } else {
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ output += 1;
+ }
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qc8-dwconv/gen/up24x9-minmax-fp32-avx2-mul32.c b/src/qc8-dwconv/gen/up24x9-minmax-fp32-avx2-mul32.c
new file mode 100644
index 0000000..02ccfa8
--- /dev/null
+++ b/src/qc8-dwconv/gen/up24x9-minmax-fp32-avx2-mul32.c
@@ -0,0 +1,336 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/unipass-avx2-mul32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up24x9__avx2_mul32(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const void* w = weights;
+ for (; c >= 24; c -= 24) {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+ __m256i vacc89ABCDEF = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+ __m256i vaccGHIJKLMN = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 16 * sizeof(int32_t)));
+
+
+ const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
+ const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 0 * sizeof(int8_t))));
+ const __m256i vi0x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
+ const __m256i vk0x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 8 * sizeof(int8_t))));
+ const __m256i vi0xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i0 + 16)));
+ const __m256i vk0xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 16 * sizeof(int8_t))));
+ i0 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi0x89ABCDEF, vk0x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi0xGHIJKLMN, vk0xGHIJKLMN));
+
+ const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
+ const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 24 * sizeof(int8_t))));
+ const __m256i vi1x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
+ const __m256i vk1x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 32 * sizeof(int8_t))));
+ const __m256i vi1xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i1 + 16)));
+ const __m256i vk1xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 40 * sizeof(int8_t))));
+ i1 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi1x89ABCDEF, vk1x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi1xGHIJKLMN, vk1xGHIJKLMN));
+
+ const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
+ const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 48 * sizeof(int8_t))));
+ const __m256i vi2x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
+ const __m256i vk2x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 56 * sizeof(int8_t))));
+ const __m256i vi2xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i2 + 16)));
+ const __m256i vk2xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 64 * sizeof(int8_t))));
+ i2 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi2x89ABCDEF, vk2x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi2xGHIJKLMN, vk2xGHIJKLMN));
+
+ const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
+ const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 72 * sizeof(int8_t))));
+ const __m256i vi3x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
+ const __m256i vk3x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 80 * sizeof(int8_t))));
+ const __m256i vi3xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i3 + 16)));
+ const __m256i vk3xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 88 * sizeof(int8_t))));
+ i3 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi3x89ABCDEF, vk3x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi3xGHIJKLMN, vk3xGHIJKLMN));
+
+ const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
+ const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 96 * sizeof(int8_t))));
+ const __m256i vi4x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
+ const __m256i vk4x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 104 * sizeof(int8_t))));
+ const __m256i vi4xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i4 + 16)));
+ const __m256i vk4xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 112 * sizeof(int8_t))));
+ i4 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi4x89ABCDEF, vk4x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi4xGHIJKLMN, vk4xGHIJKLMN));
+
+ const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
+ const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 120 * sizeof(int8_t))));
+ const __m256i vi5x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
+ const __m256i vk5x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 128 * sizeof(int8_t))));
+ const __m256i vi5xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i5 + 16)));
+ const __m256i vk5xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 136 * sizeof(int8_t))));
+ i5 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi5x89ABCDEF, vk5x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi5xGHIJKLMN, vk5xGHIJKLMN));
+
+ const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
+ const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 144 * sizeof(int8_t))));
+ const __m256i vi6x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
+ const __m256i vk6x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 152 * sizeof(int8_t))));
+ const __m256i vi6xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i6 + 16)));
+ const __m256i vk6xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 160 * sizeof(int8_t))));
+ i6 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi6x89ABCDEF, vk6x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi6xGHIJKLMN, vk6xGHIJKLMN));
+
+ const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
+ const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 168 * sizeof(int8_t))));
+ const __m256i vi7x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i7 + 8)));
+ const __m256i vk7x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 176 * sizeof(int8_t))));
+ const __m256i vi7xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i7 + 16)));
+ const __m256i vk7xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 184 * sizeof(int8_t))));
+ i7 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi7x89ABCDEF, vk7x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi7xGHIJKLMN, vk7xGHIJKLMN));
+
+ const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
+ const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 192 * sizeof(int8_t))));
+ const __m256i vi8x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i8 + 8)));
+ const __m256i vk8x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 200 * sizeof(int8_t))));
+ const __m256i vi8xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i8 + 16)));
+ const __m256i vk8xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 208 * sizeof(int8_t))));
+ i8 += 24;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi8x89ABCDEF, vk8x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi8xGHIJKLMN, vk8xGHIJKLMN));
+
+ w = (const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 216 * sizeof(int8_t));
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ __m256 vscaled89ABCDEF = _mm256_cvtepi32_ps(vacc89ABCDEF);
+ __m256 vscaledGHIJKLMN = _mm256_cvtepi32_ps(vaccGHIJKLMN);
+
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w);
+ const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(float)));
+ const __m256 vscaleGHIJKLMN = _mm256_loadu_ps((const float*) ((uintptr_t) w + 16 * sizeof(float)));
+ w = (const void*) ((uintptr_t) w + 24 * sizeof(float));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vscaled89ABCDEF = _mm256_mul_ps(vscaled89ABCDEF, vscale89ABCDEF);
+ vscaledGHIJKLMN = _mm256_mul_ps(vscaledGHIJKLMN, vscaleGHIJKLMN);
+
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+ vaccGHIJKLMN = _mm256_cvtps_epi32(vscaledGHIJKLMN);
+
+ const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
+ __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vaccGHIJKLMN), _mm256_extracti128_si256(vaccGHIJKLMN, 1)), _mm256_castsi256_si128(voutput_zero_point));
+
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+ __m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
+ vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
+ voutGHIJKLMNGHIJKLMN = _mm_max_epi8(voutGHIJKLMNGHIJKLMN, voutput_min);
+ voutGHIJKLMNGHIJKLMN = _mm_min_epi8(voutGHIJKLMNGHIJKLMN, voutput_max);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ _mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
+ output += 24;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ const int8_t* k = (const int8_t*) ((uintptr_t) w + 24 * sizeof(int32_t));
+ do {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+
+
+ const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
+ const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) k));
+ i0 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
+
+ const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
+ const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 24)));
+ i1 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
+
+ const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
+ const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 48)));
+ i2 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
+
+ const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
+ const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 72)));
+ i3 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
+
+ const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
+ const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 96)));
+ i4 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
+
+ const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
+ const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 120)));
+ i5 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
+
+ const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
+ const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 144)));
+ i6 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
+
+ const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
+ const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 168)));
+ i7 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
+
+ const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
+ const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 192)));
+ i8 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
+
+ k += 8;
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) ((uintptr_t) w + 24 * sizeof(int32_t) + 216 * sizeof(int8_t)));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
+ vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
+
+ if XNN_LIKELY(c >= 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ c -= 8;
+ } else {
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ output += 1;
+ }
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qc8-dwconv/gen/up32x25-minmax-fp32-avx2-mul16.c b/src/qc8-dwconv/gen/up32x25-minmax-fp32-avx2-mul16.c
new file mode 100644
index 0000000..2db593a
--- /dev/null
+++ b/src/qc8-dwconv/gen/up32x25-minmax-fp32-avx2-mul16.c
@@ -0,0 +1,871 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/unipass-avx2-mul16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ const int8_t* i9 = input[9];
+ assert(i9 != NULL);
+ if XNN_UNPREDICTABLE(i9 != zero) {
+ i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
+ }
+ const int8_t* i10 = input[10];
+ assert(i10 != NULL);
+ if XNN_UNPREDICTABLE(i10 != zero) {
+ i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
+ }
+ const int8_t* i11 = input[11];
+ assert(i11 != NULL);
+ if XNN_UNPREDICTABLE(i11 != zero) {
+ i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
+ }
+ const int8_t* i12 = input[12];
+ assert(i12 != NULL);
+ if XNN_UNPREDICTABLE(i12 != zero) {
+ i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
+ }
+ const int8_t* i13 = input[13];
+ assert(i13 != NULL);
+ if XNN_UNPREDICTABLE(i13 != zero) {
+ i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
+ }
+ const int8_t* i14 = input[14];
+ assert(i14 != NULL);
+ if XNN_UNPREDICTABLE(i14 != zero) {
+ i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
+ }
+ const int8_t* i15 = input[15];
+ assert(i15 != NULL);
+ if XNN_UNPREDICTABLE(i15 != zero) {
+ i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
+ }
+ const int8_t* i16 = input[16];
+ assert(i16 != NULL);
+ if XNN_UNPREDICTABLE(i16 != zero) {
+ i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
+ }
+ const int8_t* i17 = input[17];
+ assert(i17 != NULL);
+ if XNN_UNPREDICTABLE(i17 != zero) {
+ i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
+ }
+ const int8_t* i18 = input[18];
+ assert(i18 != NULL);
+ if XNN_UNPREDICTABLE(i18 != zero) {
+ i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
+ }
+ const int8_t* i19 = input[19];
+ assert(i19 != NULL);
+ if XNN_UNPREDICTABLE(i19 != zero) {
+ i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
+ }
+ const int8_t* i20 = input[20];
+ assert(i20 != NULL);
+ if XNN_UNPREDICTABLE(i20 != zero) {
+ i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
+ }
+ const int8_t* i21 = input[21];
+ assert(i21 != NULL);
+ if XNN_UNPREDICTABLE(i21 != zero) {
+ i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
+ }
+ const int8_t* i22 = input[22];
+ assert(i22 != NULL);
+ if XNN_UNPREDICTABLE(i22 != zero) {
+ i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
+ }
+ const int8_t* i23 = input[23];
+ assert(i23 != NULL);
+ if XNN_UNPREDICTABLE(i23 != zero) {
+ i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
+ }
+ const int8_t* i24 = input[24];
+ assert(i24 != NULL);
+ if XNN_UNPREDICTABLE(i24 != zero) {
+ i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const void* w = weights;
+ for (; c >= 32; c -= 32) {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+ __m256i vacc89ABCDEF = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+ __m256i vaccGHIJKLMN = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 16 * sizeof(int32_t)));
+ __m256i vaccOPQRSTUV = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 24 * sizeof(int32_t)));
+
+
+ const __m256i vi0x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i0));
+ const __m256i vk0x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 0 * sizeof(int8_t))));
+ const __m256i vi0xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i0 + 16)));
+ const __m256i vk0xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 16 * sizeof(int8_t))));
+ i0 += 32;
+
+ const __m256i vprod0x0123456789ABCDEF = _mm256_mullo_epi16(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF);
+ const __m128i vprod0x89ABCDEF = _mm256_extracti128_si256(vprod0x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod0x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod0x89ABCDEF));
+ const __m256i vprod0xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi0xGHIJKLMNOPQRSTUV, vk0xGHIJKLMNOPQRSTUV);
+ const __m128i vprod0xOPQRSTUV = _mm256_extracti128_si256(vprod0xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod0xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod0xOPQRSTUV));
+
+ const __m256i vi1x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i1));
+ const __m256i vk1x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 32 * sizeof(int8_t))));
+ const __m256i vi1xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i1 + 16)));
+ const __m256i vk1xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 48 * sizeof(int8_t))));
+ i1 += 32;
+
+ const __m256i vprod1x0123456789ABCDEF = _mm256_mullo_epi16(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
+ const __m128i vprod1x89ABCDEF = _mm256_extracti128_si256(vprod1x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod1x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod1x89ABCDEF));
+ const __m256i vprod1xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi1xGHIJKLMNOPQRSTUV, vk1xGHIJKLMNOPQRSTUV);
+ const __m128i vprod1xOPQRSTUV = _mm256_extracti128_si256(vprod1xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod1xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod1xOPQRSTUV));
+
+ const __m256i vi2x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i2));
+ const __m256i vk2x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 64 * sizeof(int8_t))));
+ const __m256i vi2xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i2 + 16)));
+ const __m256i vk2xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 80 * sizeof(int8_t))));
+ i2 += 32;
+
+ const __m256i vprod2x0123456789ABCDEF = _mm256_mullo_epi16(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF);
+ const __m128i vprod2x89ABCDEF = _mm256_extracti128_si256(vprod2x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod2x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod2x89ABCDEF));
+ const __m256i vprod2xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi2xGHIJKLMNOPQRSTUV, vk2xGHIJKLMNOPQRSTUV);
+ const __m128i vprod2xOPQRSTUV = _mm256_extracti128_si256(vprod2xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod2xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod2xOPQRSTUV));
+
+ const __m256i vi3x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i3));
+ const __m256i vk3x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 96 * sizeof(int8_t))));
+ const __m256i vi3xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i3 + 16)));
+ const __m256i vk3xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 112 * sizeof(int8_t))));
+ i3 += 32;
+
+ const __m256i vprod3x0123456789ABCDEF = _mm256_mullo_epi16(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF);
+ const __m128i vprod3x89ABCDEF = _mm256_extracti128_si256(vprod3x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod3x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod3x89ABCDEF));
+ const __m256i vprod3xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi3xGHIJKLMNOPQRSTUV, vk3xGHIJKLMNOPQRSTUV);
+ const __m128i vprod3xOPQRSTUV = _mm256_extracti128_si256(vprod3xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod3xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod3xOPQRSTUV));
+
+ const __m256i vi4x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i4));
+ const __m256i vk4x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 128 * sizeof(int8_t))));
+ const __m256i vi4xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i4 + 16)));
+ const __m256i vk4xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 144 * sizeof(int8_t))));
+ i4 += 32;
+
+ const __m256i vprod4x0123456789ABCDEF = _mm256_mullo_epi16(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF);
+ const __m128i vprod4x89ABCDEF = _mm256_extracti128_si256(vprod4x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod4x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod4x89ABCDEF));
+ const __m256i vprod4xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi4xGHIJKLMNOPQRSTUV, vk4xGHIJKLMNOPQRSTUV);
+ const __m128i vprod4xOPQRSTUV = _mm256_extracti128_si256(vprod4xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod4xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod4xOPQRSTUV));
+
+ const __m256i vi5x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i5));
+ const __m256i vk5x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 160 * sizeof(int8_t))));
+ const __m256i vi5xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i5 + 16)));
+ const __m256i vk5xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 176 * sizeof(int8_t))));
+ i5 += 32;
+
+ const __m256i vprod5x0123456789ABCDEF = _mm256_mullo_epi16(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF);
+ const __m128i vprod5x89ABCDEF = _mm256_extracti128_si256(vprod5x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod5x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod5x89ABCDEF));
+ const __m256i vprod5xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi5xGHIJKLMNOPQRSTUV, vk5xGHIJKLMNOPQRSTUV);
+ const __m128i vprod5xOPQRSTUV = _mm256_extracti128_si256(vprod5xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod5xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod5xOPQRSTUV));
+
+ const __m256i vi6x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i6));
+ const __m256i vk6x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 192 * sizeof(int8_t))));
+ const __m256i vi6xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i6 + 16)));
+ const __m256i vk6xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 208 * sizeof(int8_t))));
+ i6 += 32;
+
+ const __m256i vprod6x0123456789ABCDEF = _mm256_mullo_epi16(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF);
+ const __m128i vprod6x89ABCDEF = _mm256_extracti128_si256(vprod6x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod6x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod6x89ABCDEF));
+ const __m256i vprod6xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi6xGHIJKLMNOPQRSTUV, vk6xGHIJKLMNOPQRSTUV);
+ const __m128i vprod6xOPQRSTUV = _mm256_extracti128_si256(vprod6xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod6xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod6xOPQRSTUV));
+
+ const __m256i vi7x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i7));
+ const __m256i vk7x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 224 * sizeof(int8_t))));
+ const __m256i vi7xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i7 + 16)));
+ const __m256i vk7xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 240 * sizeof(int8_t))));
+ i7 += 32;
+
+ const __m256i vprod7x0123456789ABCDEF = _mm256_mullo_epi16(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF);
+ const __m128i vprod7x89ABCDEF = _mm256_extracti128_si256(vprod7x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod7x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod7x89ABCDEF));
+ const __m256i vprod7xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi7xGHIJKLMNOPQRSTUV, vk7xGHIJKLMNOPQRSTUV);
+ const __m128i vprod7xOPQRSTUV = _mm256_extracti128_si256(vprod7xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod7xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod7xOPQRSTUV));
+
+ const __m256i vi8x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i8));
+ const __m256i vk8x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 256 * sizeof(int8_t))));
+ const __m256i vi8xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i8 + 16)));
+ const __m256i vk8xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 272 * sizeof(int8_t))));
+ i8 += 32;
+
+ const __m256i vprod8x0123456789ABCDEF = _mm256_mullo_epi16(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF);
+ const __m128i vprod8x89ABCDEF = _mm256_extracti128_si256(vprod8x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod8x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod8x89ABCDEF));
+ const __m256i vprod8xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi8xGHIJKLMNOPQRSTUV, vk8xGHIJKLMNOPQRSTUV);
+ const __m128i vprod8xOPQRSTUV = _mm256_extracti128_si256(vprod8xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod8xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod8xOPQRSTUV));
+
+ const __m256i vi9x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i9));
+ const __m256i vk9x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 288 * sizeof(int8_t))));
+ const __m256i vi9xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i9 + 16)));
+ const __m256i vk9xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 304 * sizeof(int8_t))));
+ i9 += 32;
+
+ const __m256i vprod9x0123456789ABCDEF = _mm256_mullo_epi16(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF);
+ const __m128i vprod9x89ABCDEF = _mm256_extracti128_si256(vprod9x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod9x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod9x89ABCDEF));
+ const __m256i vprod9xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi9xGHIJKLMNOPQRSTUV, vk9xGHIJKLMNOPQRSTUV);
+ const __m128i vprod9xOPQRSTUV = _mm256_extracti128_si256(vprod9xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod9xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod9xOPQRSTUV));
+
+ const __m256i vi10x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i10));
+ const __m256i vk10x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 320 * sizeof(int8_t))));
+ const __m256i vi10xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i10 + 16)));
+ const __m256i vk10xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 336 * sizeof(int8_t))));
+ i10 += 32;
+
+ const __m256i vprod10x0123456789ABCDEF = _mm256_mullo_epi16(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF);
+ const __m128i vprod10x89ABCDEF = _mm256_extracti128_si256(vprod10x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod10x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod10x89ABCDEF));
+ const __m256i vprod10xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi10xGHIJKLMNOPQRSTUV, vk10xGHIJKLMNOPQRSTUV);
+ const __m128i vprod10xOPQRSTUV = _mm256_extracti128_si256(vprod10xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod10xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod10xOPQRSTUV));
+
+ const __m256i vi11x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i11));
+ const __m256i vk11x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 352 * sizeof(int8_t))));
+ const __m256i vi11xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i11 + 16)));
+ const __m256i vk11xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 368 * sizeof(int8_t))));
+ i11 += 32;
+
+ const __m256i vprod11x0123456789ABCDEF = _mm256_mullo_epi16(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF);
+ const __m128i vprod11x89ABCDEF = _mm256_extracti128_si256(vprod11x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod11x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod11x89ABCDEF));
+ const __m256i vprod11xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi11xGHIJKLMNOPQRSTUV, vk11xGHIJKLMNOPQRSTUV);
+ const __m128i vprod11xOPQRSTUV = _mm256_extracti128_si256(vprod11xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod11xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod11xOPQRSTUV));
+
+ const __m256i vi12x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i12));
+ const __m256i vk12x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 384 * sizeof(int8_t))));
+ const __m256i vi12xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i12 + 16)));
+ const __m256i vk12xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 400 * sizeof(int8_t))));
+ i12 += 32;
+
+ const __m256i vprod12x0123456789ABCDEF = _mm256_mullo_epi16(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF);
+ const __m128i vprod12x89ABCDEF = _mm256_extracti128_si256(vprod12x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod12x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod12x89ABCDEF));
+ const __m256i vprod12xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi12xGHIJKLMNOPQRSTUV, vk12xGHIJKLMNOPQRSTUV);
+ const __m128i vprod12xOPQRSTUV = _mm256_extracti128_si256(vprod12xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod12xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod12xOPQRSTUV));
+
+ const __m256i vi13x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i13));
+ const __m256i vk13x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 416 * sizeof(int8_t))));
+ const __m256i vi13xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i13 + 16)));
+ const __m256i vk13xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 432 * sizeof(int8_t))));
+ i13 += 32;
+
+ const __m256i vprod13x0123456789ABCDEF = _mm256_mullo_epi16(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF);
+ const __m128i vprod13x89ABCDEF = _mm256_extracti128_si256(vprod13x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod13x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod13x89ABCDEF));
+ const __m256i vprod13xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi13xGHIJKLMNOPQRSTUV, vk13xGHIJKLMNOPQRSTUV);
+ const __m128i vprod13xOPQRSTUV = _mm256_extracti128_si256(vprod13xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod13xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod13xOPQRSTUV));
+
+ const __m256i vi14x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i14));
+ const __m256i vk14x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 448 * sizeof(int8_t))));
+ const __m256i vi14xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i14 + 16)));
+ const __m256i vk14xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 464 * sizeof(int8_t))));
+ i14 += 32;
+
+ const __m256i vprod14x0123456789ABCDEF = _mm256_mullo_epi16(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF);
+ const __m128i vprod14x89ABCDEF = _mm256_extracti128_si256(vprod14x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod14x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod14x89ABCDEF));
+ const __m256i vprod14xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi14xGHIJKLMNOPQRSTUV, vk14xGHIJKLMNOPQRSTUV);
+ const __m128i vprod14xOPQRSTUV = _mm256_extracti128_si256(vprod14xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod14xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod14xOPQRSTUV));
+
+ const __m256i vi15x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i15));
+ const __m256i vk15x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 480 * sizeof(int8_t))));
+ const __m256i vi15xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i15 + 16)));
+ const __m256i vk15xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 496 * sizeof(int8_t))));
+ i15 += 32;
+
+ const __m256i vprod15x0123456789ABCDEF = _mm256_mullo_epi16(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF);
+ const __m128i vprod15x89ABCDEF = _mm256_extracti128_si256(vprod15x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod15x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod15x89ABCDEF));
+ const __m256i vprod15xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi15xGHIJKLMNOPQRSTUV, vk15xGHIJKLMNOPQRSTUV);
+ const __m128i vprod15xOPQRSTUV = _mm256_extracti128_si256(vprod15xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod15xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod15xOPQRSTUV));
+
+ const __m256i vi16x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i16));
+ const __m256i vk16x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 512 * sizeof(int8_t))));
+ const __m256i vi16xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i16 + 16)));
+ const __m256i vk16xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 528 * sizeof(int8_t))));
+ i16 += 32;
+
+ const __m256i vprod16x0123456789ABCDEF = _mm256_mullo_epi16(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF);
+ const __m128i vprod16x89ABCDEF = _mm256_extracti128_si256(vprod16x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod16x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod16x89ABCDEF));
+ const __m256i vprod16xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi16xGHIJKLMNOPQRSTUV, vk16xGHIJKLMNOPQRSTUV);
+ const __m128i vprod16xOPQRSTUV = _mm256_extracti128_si256(vprod16xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod16xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod16xOPQRSTUV));
+
+ const __m256i vi17x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i17));
+ const __m256i vk17x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 544 * sizeof(int8_t))));
+ const __m256i vi17xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i17 + 16)));
+ const __m256i vk17xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 560 * sizeof(int8_t))));
+ i17 += 32;
+
+ const __m256i vprod17x0123456789ABCDEF = _mm256_mullo_epi16(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF);
+ const __m128i vprod17x89ABCDEF = _mm256_extracti128_si256(vprod17x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod17x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod17x89ABCDEF));
+ const __m256i vprod17xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi17xGHIJKLMNOPQRSTUV, vk17xGHIJKLMNOPQRSTUV);
+ const __m128i vprod17xOPQRSTUV = _mm256_extracti128_si256(vprod17xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod17xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod17xOPQRSTUV));
+
+ const __m256i vi18x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i18));
+ const __m256i vk18x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 576 * sizeof(int8_t))));
+ const __m256i vi18xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i18 + 16)));
+ const __m256i vk18xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 592 * sizeof(int8_t))));
+ i18 += 32;
+
+ const __m256i vprod18x0123456789ABCDEF = _mm256_mullo_epi16(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF);
+ const __m128i vprod18x89ABCDEF = _mm256_extracti128_si256(vprod18x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod18x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod18x89ABCDEF));
+ const __m256i vprod18xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi18xGHIJKLMNOPQRSTUV, vk18xGHIJKLMNOPQRSTUV);
+ const __m128i vprod18xOPQRSTUV = _mm256_extracti128_si256(vprod18xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod18xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod18xOPQRSTUV));
+
+ const __m256i vi19x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i19));
+ const __m256i vk19x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 608 * sizeof(int8_t))));
+ const __m256i vi19xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i19 + 16)));
+ const __m256i vk19xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 624 * sizeof(int8_t))));
+ i19 += 32;
+
+ const __m256i vprod19x0123456789ABCDEF = _mm256_mullo_epi16(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF);
+ const __m128i vprod19x89ABCDEF = _mm256_extracti128_si256(vprod19x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod19x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod19x89ABCDEF));
+ const __m256i vprod19xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi19xGHIJKLMNOPQRSTUV, vk19xGHIJKLMNOPQRSTUV);
+ const __m128i vprod19xOPQRSTUV = _mm256_extracti128_si256(vprod19xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod19xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod19xOPQRSTUV));
+
+ const __m256i vi20x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i20));
+ const __m256i vk20x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 640 * sizeof(int8_t))));
+ const __m256i vi20xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i20 + 16)));
+ const __m256i vk20xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 656 * sizeof(int8_t))));
+ i20 += 32;
+
+ const __m256i vprod20x0123456789ABCDEF = _mm256_mullo_epi16(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF);
+ const __m128i vprod20x89ABCDEF = _mm256_extracti128_si256(vprod20x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod20x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod20x89ABCDEF));
+ const __m256i vprod20xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi20xGHIJKLMNOPQRSTUV, vk20xGHIJKLMNOPQRSTUV);
+ const __m128i vprod20xOPQRSTUV = _mm256_extracti128_si256(vprod20xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod20xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod20xOPQRSTUV));
+
+ const __m256i vi21x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i21));
+ const __m256i vk21x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 672 * sizeof(int8_t))));
+ const __m256i vi21xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i21 + 16)));
+ const __m256i vk21xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 688 * sizeof(int8_t))));
+ i21 += 32;
+
+ const __m256i vprod21x0123456789ABCDEF = _mm256_mullo_epi16(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF);
+ const __m128i vprod21x89ABCDEF = _mm256_extracti128_si256(vprod21x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod21x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod21x89ABCDEF));
+ const __m256i vprod21xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi21xGHIJKLMNOPQRSTUV, vk21xGHIJKLMNOPQRSTUV);
+ const __m128i vprod21xOPQRSTUV = _mm256_extracti128_si256(vprod21xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod21xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod21xOPQRSTUV));
+
+ const __m256i vi22x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i22));
+ const __m256i vk22x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 704 * sizeof(int8_t))));
+ const __m256i vi22xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i22 + 16)));
+ const __m256i vk22xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 720 * sizeof(int8_t))));
+ i22 += 32;
+
+ const __m256i vprod22x0123456789ABCDEF = _mm256_mullo_epi16(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF);
+ const __m128i vprod22x89ABCDEF = _mm256_extracti128_si256(vprod22x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod22x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod22x89ABCDEF));
+ const __m256i vprod22xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi22xGHIJKLMNOPQRSTUV, vk22xGHIJKLMNOPQRSTUV);
+ const __m128i vprod22xOPQRSTUV = _mm256_extracti128_si256(vprod22xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod22xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod22xOPQRSTUV));
+
+ const __m256i vi23x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i23));
+ const __m256i vk23x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 736 * sizeof(int8_t))));
+ const __m256i vi23xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i23 + 16)));
+ const __m256i vk23xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 752 * sizeof(int8_t))));
+ i23 += 32;
+
+ const __m256i vprod23x0123456789ABCDEF = _mm256_mullo_epi16(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF);
+ const __m128i vprod23x89ABCDEF = _mm256_extracti128_si256(vprod23x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod23x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod23x89ABCDEF));
+ const __m256i vprod23xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi23xGHIJKLMNOPQRSTUV, vk23xGHIJKLMNOPQRSTUV);
+ const __m128i vprod23xOPQRSTUV = _mm256_extracti128_si256(vprod23xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod23xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod23xOPQRSTUV));
+
+ const __m256i vi24x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i24));
+ const __m256i vk24x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 768 * sizeof(int8_t))));
+ const __m256i vi24xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i24 + 16)));
+ const __m256i vk24xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 784 * sizeof(int8_t))));
+ i24 += 32;
+
+ const __m256i vprod24x0123456789ABCDEF = _mm256_mullo_epi16(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF);
+ const __m128i vprod24x89ABCDEF = _mm256_extracti128_si256(vprod24x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod24x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod24x89ABCDEF));
+ const __m256i vprod24xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi24xGHIJKLMNOPQRSTUV, vk24xGHIJKLMNOPQRSTUV);
+ const __m128i vprod24xOPQRSTUV = _mm256_extracti128_si256(vprod24xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod24xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod24xOPQRSTUV));
+
+ w = (const void*) ((uintptr_t) w + 32 * sizeof(int32_t) + 800 * sizeof(int8_t));
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ __m256 vscaled89ABCDEF = _mm256_cvtepi32_ps(vacc89ABCDEF);
+ __m256 vscaledGHIJKLMN = _mm256_cvtepi32_ps(vaccGHIJKLMN);
+ __m256 vscaledOPQRSTUV = _mm256_cvtepi32_ps(vaccOPQRSTUV);
+
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w);
+ const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(float)));
+ const __m256 vscaleGHIJKLMN = _mm256_loadu_ps((const float*) ((uintptr_t) w + 16 * sizeof(float)));
+ const __m256 vscaleOPQRSTUV = _mm256_loadu_ps((const float*) ((uintptr_t) w + 24 * sizeof(float)));
+ w = (const void*) ((uintptr_t) w + 32 * sizeof(float));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vscaled89ABCDEF = _mm256_mul_ps(vscaled89ABCDEF, vscale89ABCDEF);
+ vscaledGHIJKLMN = _mm256_mul_ps(vscaledGHIJKLMN, vscaleGHIJKLMN);
+ vscaledOPQRSTUV = _mm256_mul_ps(vscaledOPQRSTUV, vscaleOPQRSTUV);
+
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+ vaccGHIJKLMN = _mm256_cvtps_epi32(vscaledGHIJKLMN);
+ vaccOPQRSTUV = _mm256_cvtps_epi32(vscaledOPQRSTUV);
+
+ const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
+ const __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
+ const __m256i voutGHIJOPQRKLMNSTUV = _mm256_adds_epi16(_mm256_packs_epi32(vaccGHIJKLMN, vaccOPQRSTUV), voutput_zero_point);
+
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+ __m128i voutGHIJKLMNOPQRSTUV = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(voutGHIJOPQRKLMNSTUV), _mm256_extracti128_si256(voutGHIJOPQRKLMNSTUV, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
+ vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
+ voutGHIJKLMNOPQRSTUV = _mm_max_epi8(voutGHIJKLMNOPQRSTUV, voutput_min);
+ voutGHIJKLMNOPQRSTUV = _mm_min_epi8(voutGHIJKLMNOPQRSTUV, voutput_max);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ _mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
+ output += 32;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ const int8_t* k = (const int8_t*) ((uintptr_t) w + 32 * sizeof(int32_t));
+ do {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+ __m256i vacc89ABCDEF = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+
+
+ const __m256i vi0x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i0));
+ const __m256i vk0x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) k));
+ i0 += 16;
+
+ const __m256i vprod0x0123456789ABCDEF = _mm256_mullo_epi16(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF);
+ const __m128i vprod0x89ABCDEF = _mm256_extracti128_si256(vprod0x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod0x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod0x89ABCDEF));
+
+ const __m256i vi1x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i1));
+ const __m256i vk1x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 32)));
+ i1 += 16;
+
+ const __m256i vprod1x0123456789ABCDEF = _mm256_mullo_epi16(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
+ const __m128i vprod1x89ABCDEF = _mm256_extracti128_si256(vprod1x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod1x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod1x89ABCDEF));
+
+ const __m256i vi2x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i2));
+ const __m256i vk2x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 64)));
+ i2 += 16;
+
+ const __m256i vprod2x0123456789ABCDEF = _mm256_mullo_epi16(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF);
+ const __m128i vprod2x89ABCDEF = _mm256_extracti128_si256(vprod2x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod2x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod2x89ABCDEF));
+
+ const __m256i vi3x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i3));
+ const __m256i vk3x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 96)));
+ i3 += 16;
+
+ const __m256i vprod3x0123456789ABCDEF = _mm256_mullo_epi16(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF);
+ const __m128i vprod3x89ABCDEF = _mm256_extracti128_si256(vprod3x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod3x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod3x89ABCDEF));
+
+ const __m256i vi4x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i4));
+ const __m256i vk4x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 128)));
+ i4 += 16;
+
+ const __m256i vprod4x0123456789ABCDEF = _mm256_mullo_epi16(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF);
+ const __m128i vprod4x89ABCDEF = _mm256_extracti128_si256(vprod4x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod4x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod4x89ABCDEF));
+
+ const __m256i vi5x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i5));
+ const __m256i vk5x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 160)));
+ i5 += 16;
+
+ const __m256i vprod5x0123456789ABCDEF = _mm256_mullo_epi16(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF);
+ const __m128i vprod5x89ABCDEF = _mm256_extracti128_si256(vprod5x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod5x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod5x89ABCDEF));
+
+ const __m256i vi6x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i6));
+ const __m256i vk6x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 192)));
+ i6 += 16;
+
+ const __m256i vprod6x0123456789ABCDEF = _mm256_mullo_epi16(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF);
+ const __m128i vprod6x89ABCDEF = _mm256_extracti128_si256(vprod6x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod6x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod6x89ABCDEF));
+
+ const __m256i vi7x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i7));
+ const __m256i vk7x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 224)));
+ i7 += 16;
+
+ const __m256i vprod7x0123456789ABCDEF = _mm256_mullo_epi16(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF);
+ const __m128i vprod7x89ABCDEF = _mm256_extracti128_si256(vprod7x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod7x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod7x89ABCDEF));
+
+ const __m256i vi8x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i8));
+ const __m256i vk8x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 256)));
+ i8 += 16;
+
+ const __m256i vprod8x0123456789ABCDEF = _mm256_mullo_epi16(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF);
+ const __m128i vprod8x89ABCDEF = _mm256_extracti128_si256(vprod8x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod8x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod8x89ABCDEF));
+
+ const __m256i vi9x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i9));
+ const __m256i vk9x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 288)));
+ i9 += 16;
+
+ const __m256i vprod9x0123456789ABCDEF = _mm256_mullo_epi16(vi9x0123456789ABCDEF, vk9x0123456789ABCDEF);
+ const __m128i vprod9x89ABCDEF = _mm256_extracti128_si256(vprod9x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod9x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod9x89ABCDEF));
+
+ const __m256i vi10x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i10));
+ const __m256i vk10x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 320)));
+ i10 += 16;
+
+ const __m256i vprod10x0123456789ABCDEF = _mm256_mullo_epi16(vi10x0123456789ABCDEF, vk10x0123456789ABCDEF);
+ const __m128i vprod10x89ABCDEF = _mm256_extracti128_si256(vprod10x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod10x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod10x89ABCDEF));
+
+ const __m256i vi11x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i11));
+ const __m256i vk11x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 352)));
+ i11 += 16;
+
+ const __m256i vprod11x0123456789ABCDEF = _mm256_mullo_epi16(vi11x0123456789ABCDEF, vk11x0123456789ABCDEF);
+ const __m128i vprod11x89ABCDEF = _mm256_extracti128_si256(vprod11x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod11x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod11x89ABCDEF));
+
+ const __m256i vi12x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i12));
+ const __m256i vk12x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 384)));
+ i12 += 16;
+
+ const __m256i vprod12x0123456789ABCDEF = _mm256_mullo_epi16(vi12x0123456789ABCDEF, vk12x0123456789ABCDEF);
+ const __m128i vprod12x89ABCDEF = _mm256_extracti128_si256(vprod12x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod12x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod12x89ABCDEF));
+
+ const __m256i vi13x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i13));
+ const __m256i vk13x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 416)));
+ i13 += 16;
+
+ const __m256i vprod13x0123456789ABCDEF = _mm256_mullo_epi16(vi13x0123456789ABCDEF, vk13x0123456789ABCDEF);
+ const __m128i vprod13x89ABCDEF = _mm256_extracti128_si256(vprod13x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod13x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod13x89ABCDEF));
+
+ const __m256i vi14x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i14));
+ const __m256i vk14x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 448)));
+ i14 += 16;
+
+ const __m256i vprod14x0123456789ABCDEF = _mm256_mullo_epi16(vi14x0123456789ABCDEF, vk14x0123456789ABCDEF);
+ const __m128i vprod14x89ABCDEF = _mm256_extracti128_si256(vprod14x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod14x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod14x89ABCDEF));
+
+ const __m256i vi15x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i15));
+ const __m256i vk15x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 480)));
+ i15 += 16;
+
+ const __m256i vprod15x0123456789ABCDEF = _mm256_mullo_epi16(vi15x0123456789ABCDEF, vk15x0123456789ABCDEF);
+ const __m128i vprod15x89ABCDEF = _mm256_extracti128_si256(vprod15x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod15x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod15x89ABCDEF));
+
+ const __m256i vi16x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i16));
+ const __m256i vk16x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 512)));
+ i16 += 16;
+
+ const __m256i vprod16x0123456789ABCDEF = _mm256_mullo_epi16(vi16x0123456789ABCDEF, vk16x0123456789ABCDEF);
+ const __m128i vprod16x89ABCDEF = _mm256_extracti128_si256(vprod16x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod16x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod16x89ABCDEF));
+
+ const __m256i vi17x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i17));
+ const __m256i vk17x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 544)));
+ i17 += 16;
+
+ const __m256i vprod17x0123456789ABCDEF = _mm256_mullo_epi16(vi17x0123456789ABCDEF, vk17x0123456789ABCDEF);
+ const __m128i vprod17x89ABCDEF = _mm256_extracti128_si256(vprod17x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod17x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod17x89ABCDEF));
+
+ const __m256i vi18x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i18));
+ const __m256i vk18x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 576)));
+ i18 += 16;
+
+ const __m256i vprod18x0123456789ABCDEF = _mm256_mullo_epi16(vi18x0123456789ABCDEF, vk18x0123456789ABCDEF);
+ const __m128i vprod18x89ABCDEF = _mm256_extracti128_si256(vprod18x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod18x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod18x89ABCDEF));
+
+ const __m256i vi19x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i19));
+ const __m256i vk19x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 608)));
+ i19 += 16;
+
+ const __m256i vprod19x0123456789ABCDEF = _mm256_mullo_epi16(vi19x0123456789ABCDEF, vk19x0123456789ABCDEF);
+ const __m128i vprod19x89ABCDEF = _mm256_extracti128_si256(vprod19x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod19x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod19x89ABCDEF));
+
+ const __m256i vi20x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i20));
+ const __m256i vk20x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 640)));
+ i20 += 16;
+
+ const __m256i vprod20x0123456789ABCDEF = _mm256_mullo_epi16(vi20x0123456789ABCDEF, vk20x0123456789ABCDEF);
+ const __m128i vprod20x89ABCDEF = _mm256_extracti128_si256(vprod20x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod20x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod20x89ABCDEF));
+
+ const __m256i vi21x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i21));
+ const __m256i vk21x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 672)));
+ i21 += 16;
+
+ const __m256i vprod21x0123456789ABCDEF = _mm256_mullo_epi16(vi21x0123456789ABCDEF, vk21x0123456789ABCDEF);
+ const __m128i vprod21x89ABCDEF = _mm256_extracti128_si256(vprod21x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod21x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod21x89ABCDEF));
+
+ const __m256i vi22x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i22));
+ const __m256i vk22x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 704)));
+ i22 += 16;
+
+ const __m256i vprod22x0123456789ABCDEF = _mm256_mullo_epi16(vi22x0123456789ABCDEF, vk22x0123456789ABCDEF);
+ const __m128i vprod22x89ABCDEF = _mm256_extracti128_si256(vprod22x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod22x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod22x89ABCDEF));
+
+ const __m256i vi23x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i23));
+ const __m256i vk23x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 736)));
+ i23 += 16;
+
+ const __m256i vprod23x0123456789ABCDEF = _mm256_mullo_epi16(vi23x0123456789ABCDEF, vk23x0123456789ABCDEF);
+ const __m128i vprod23x89ABCDEF = _mm256_extracti128_si256(vprod23x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod23x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod23x89ABCDEF));
+
+ const __m256i vi24x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i24));
+ const __m256i vk24x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 768)));
+ i24 += 16;
+
+ const __m256i vprod24x0123456789ABCDEF = _mm256_mullo_epi16(vi24x0123456789ABCDEF, vk24x0123456789ABCDEF);
+ const __m128i vprod24x89ABCDEF = _mm256_extracti128_si256(vprod24x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod24x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod24x89ABCDEF));
+
+ k += 16;
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ __m256 vscaled89ABCDEF = _mm256_cvtepi32_ps(vacc89ABCDEF);
+
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) ((uintptr_t) w + 32 * sizeof(int32_t) + 800 * sizeof(int8_t)));
+ const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) ((uintptr_t) w + 32 * sizeof(int32_t) + 800 * sizeof(int8_t) + 8 * sizeof(float)));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vscaled89ABCDEF = _mm256_mul_ps(vscaled89ABCDEF, vscale89ABCDEF);
+
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+ __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extracti128_si256(vacc89ABCDEF, 1)), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+
+ __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
+ vout0123456789ABCDEF = _mm_min_epi8(_mm_max_epi8(vout0123456789ABCDEF, voutput_min), voutput_max);
+
+ if XNN_LIKELY(c >= 16) {
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ output += 16;
+ c -= 16;
+ } else {
+ if (c & 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456789ABCDEF);
+ vout0123456789ABCDEF = _mm_unpackhi_epi64(vout0123456789ABCDEF, vout0123456789ABCDEF);
+ output += 8;
+ }
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456789ABCDEF);
+ vout0123456789ABCDEF = _mm_srli_epi64(vout0123456789ABCDEF, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456789ABCDEF, 0);
+ vout0123456789ABCDEF = _mm_srli_epi32(vout0123456789ABCDEF, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456789ABCDEF, 0);
+ output += 1;
+ }
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qc8-dwconv/gen/up32x25-minmax-fp32-avx2-mul32.c b/src/qc8-dwconv/gen/up32x25-minmax-fp32-avx2-mul32.c
new file mode 100644
index 0000000..63528b2
--- /dev/null
+++ b/src/qc8-dwconv/gen/up32x25-minmax-fp32-avx2-mul32.c
@@ -0,0 +1,784 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/unipass-avx2-mul32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ const int8_t* i9 = input[9];
+ assert(i9 != NULL);
+ if XNN_UNPREDICTABLE(i9 != zero) {
+ i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
+ }
+ const int8_t* i10 = input[10];
+ assert(i10 != NULL);
+ if XNN_UNPREDICTABLE(i10 != zero) {
+ i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
+ }
+ const int8_t* i11 = input[11];
+ assert(i11 != NULL);
+ if XNN_UNPREDICTABLE(i11 != zero) {
+ i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
+ }
+ const int8_t* i12 = input[12];
+ assert(i12 != NULL);
+ if XNN_UNPREDICTABLE(i12 != zero) {
+ i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
+ }
+ const int8_t* i13 = input[13];
+ assert(i13 != NULL);
+ if XNN_UNPREDICTABLE(i13 != zero) {
+ i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
+ }
+ const int8_t* i14 = input[14];
+ assert(i14 != NULL);
+ if XNN_UNPREDICTABLE(i14 != zero) {
+ i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
+ }
+ const int8_t* i15 = input[15];
+ assert(i15 != NULL);
+ if XNN_UNPREDICTABLE(i15 != zero) {
+ i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
+ }
+ const int8_t* i16 = input[16];
+ assert(i16 != NULL);
+ if XNN_UNPREDICTABLE(i16 != zero) {
+ i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
+ }
+ const int8_t* i17 = input[17];
+ assert(i17 != NULL);
+ if XNN_UNPREDICTABLE(i17 != zero) {
+ i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
+ }
+ const int8_t* i18 = input[18];
+ assert(i18 != NULL);
+ if XNN_UNPREDICTABLE(i18 != zero) {
+ i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
+ }
+ const int8_t* i19 = input[19];
+ assert(i19 != NULL);
+ if XNN_UNPREDICTABLE(i19 != zero) {
+ i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
+ }
+ const int8_t* i20 = input[20];
+ assert(i20 != NULL);
+ if XNN_UNPREDICTABLE(i20 != zero) {
+ i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
+ }
+ const int8_t* i21 = input[21];
+ assert(i21 != NULL);
+ if XNN_UNPREDICTABLE(i21 != zero) {
+ i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
+ }
+ const int8_t* i22 = input[22];
+ assert(i22 != NULL);
+ if XNN_UNPREDICTABLE(i22 != zero) {
+ i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
+ }
+ const int8_t* i23 = input[23];
+ assert(i23 != NULL);
+ if XNN_UNPREDICTABLE(i23 != zero) {
+ i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
+ }
+ const int8_t* i24 = input[24];
+ assert(i24 != NULL);
+ if XNN_UNPREDICTABLE(i24 != zero) {
+ i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const void* w = weights;
+ for (; c >= 32; c -= 32) {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+ __m256i vacc89ABCDEF = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+ __m256i vaccGHIJKLMN = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 16 * sizeof(int32_t)));
+ __m256i vaccOPQRSTUV = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 24 * sizeof(int32_t)));
+
+
+ const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
+ const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 0 * sizeof(int8_t))));
+ const __m256i vi0x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
+ const __m256i vk0x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 8 * sizeof(int8_t))));
+ const __m256i vi0xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i0 + 16)));
+ const __m256i vk0xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 16 * sizeof(int8_t))));
+ const __m256i vi0xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i0 + 24)));
+ const __m256i vk0xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 24 * sizeof(int8_t))));
+ i0 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi0x89ABCDEF, vk0x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi0xGHIJKLMN, vk0xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi0xOPQRSTUV, vk0xOPQRSTUV));
+
+ const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
+ const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 32 * sizeof(int8_t))));
+ const __m256i vi1x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
+ const __m256i vk1x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 40 * sizeof(int8_t))));
+ const __m256i vi1xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i1 + 16)));
+ const __m256i vk1xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 48 * sizeof(int8_t))));
+ const __m256i vi1xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i1 + 24)));
+ const __m256i vk1xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 56 * sizeof(int8_t))));
+ i1 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi1x89ABCDEF, vk1x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi1xGHIJKLMN, vk1xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi1xOPQRSTUV, vk1xOPQRSTUV));
+
+ const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
+ const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 64 * sizeof(int8_t))));
+ const __m256i vi2x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
+ const __m256i vk2x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 72 * sizeof(int8_t))));
+ const __m256i vi2xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i2 + 16)));
+ const __m256i vk2xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 80 * sizeof(int8_t))));
+ const __m256i vi2xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i2 + 24)));
+ const __m256i vk2xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 88 * sizeof(int8_t))));
+ i2 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi2x89ABCDEF, vk2x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi2xGHIJKLMN, vk2xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi2xOPQRSTUV, vk2xOPQRSTUV));
+
+ const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
+ const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 96 * sizeof(int8_t))));
+ const __m256i vi3x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
+ const __m256i vk3x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 104 * sizeof(int8_t))));
+ const __m256i vi3xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i3 + 16)));
+ const __m256i vk3xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 112 * sizeof(int8_t))));
+ const __m256i vi3xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i3 + 24)));
+ const __m256i vk3xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 120 * sizeof(int8_t))));
+ i3 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi3x89ABCDEF, vk3x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi3xGHIJKLMN, vk3xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi3xOPQRSTUV, vk3xOPQRSTUV));
+
+ const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
+ const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 128 * sizeof(int8_t))));
+ const __m256i vi4x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
+ const __m256i vk4x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 136 * sizeof(int8_t))));
+ const __m256i vi4xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i4 + 16)));
+ const __m256i vk4xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 144 * sizeof(int8_t))));
+ const __m256i vi4xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i4 + 24)));
+ const __m256i vk4xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 152 * sizeof(int8_t))));
+ i4 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi4x89ABCDEF, vk4x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi4xGHIJKLMN, vk4xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi4xOPQRSTUV, vk4xOPQRSTUV));
+
+ const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
+ const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 160 * sizeof(int8_t))));
+ const __m256i vi5x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
+ const __m256i vk5x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 168 * sizeof(int8_t))));
+ const __m256i vi5xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i5 + 16)));
+ const __m256i vk5xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 176 * sizeof(int8_t))));
+ const __m256i vi5xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i5 + 24)));
+ const __m256i vk5xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 184 * sizeof(int8_t))));
+ i5 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi5x89ABCDEF, vk5x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi5xGHIJKLMN, vk5xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi5xOPQRSTUV, vk5xOPQRSTUV));
+
+ const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
+ const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 192 * sizeof(int8_t))));
+ const __m256i vi6x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
+ const __m256i vk6x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 200 * sizeof(int8_t))));
+ const __m256i vi6xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i6 + 16)));
+ const __m256i vk6xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 208 * sizeof(int8_t))));
+ const __m256i vi6xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i6 + 24)));
+ const __m256i vk6xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 216 * sizeof(int8_t))));
+ i6 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi6x89ABCDEF, vk6x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi6xGHIJKLMN, vk6xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi6xOPQRSTUV, vk6xOPQRSTUV));
+
+ const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
+ const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 224 * sizeof(int8_t))));
+ const __m256i vi7x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i7 + 8)));
+ const __m256i vk7x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 232 * sizeof(int8_t))));
+ const __m256i vi7xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i7 + 16)));
+ const __m256i vk7xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 240 * sizeof(int8_t))));
+ const __m256i vi7xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i7 + 24)));
+ const __m256i vk7xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 248 * sizeof(int8_t))));
+ i7 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi7x89ABCDEF, vk7x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi7xGHIJKLMN, vk7xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi7xOPQRSTUV, vk7xOPQRSTUV));
+
+ const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
+ const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 256 * sizeof(int8_t))));
+ const __m256i vi8x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i8 + 8)));
+ const __m256i vk8x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 264 * sizeof(int8_t))));
+ const __m256i vi8xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i8 + 16)));
+ const __m256i vk8xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 272 * sizeof(int8_t))));
+ const __m256i vi8xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i8 + 24)));
+ const __m256i vk8xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 280 * sizeof(int8_t))));
+ i8 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi8x89ABCDEF, vk8x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi8xGHIJKLMN, vk8xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi8xOPQRSTUV, vk8xOPQRSTUV));
+
+ const __m256i vi9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i9));
+ const __m256i vk9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 288 * sizeof(int8_t))));
+ const __m256i vi9x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i9 + 8)));
+ const __m256i vk9x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 296 * sizeof(int8_t))));
+ const __m256i vi9xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i9 + 16)));
+ const __m256i vk9xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 304 * sizeof(int8_t))));
+ const __m256i vi9xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i9 + 24)));
+ const __m256i vk9xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 312 * sizeof(int8_t))));
+ i9 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi9x01234567, vk9x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi9x89ABCDEF, vk9x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi9xGHIJKLMN, vk9xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi9xOPQRSTUV, vk9xOPQRSTUV));
+
+ const __m256i vi10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i10));
+ const __m256i vk10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 320 * sizeof(int8_t))));
+ const __m256i vi10x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i10 + 8)));
+ const __m256i vk10x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 328 * sizeof(int8_t))));
+ const __m256i vi10xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i10 + 16)));
+ const __m256i vk10xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 336 * sizeof(int8_t))));
+ const __m256i vi10xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i10 + 24)));
+ const __m256i vk10xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 344 * sizeof(int8_t))));
+ i10 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi10x01234567, vk10x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi10x89ABCDEF, vk10x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi10xGHIJKLMN, vk10xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi10xOPQRSTUV, vk10xOPQRSTUV));
+
+ const __m256i vi11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i11));
+ const __m256i vk11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 352 * sizeof(int8_t))));
+ const __m256i vi11x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i11 + 8)));
+ const __m256i vk11x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 360 * sizeof(int8_t))));
+ const __m256i vi11xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i11 + 16)));
+ const __m256i vk11xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 368 * sizeof(int8_t))));
+ const __m256i vi11xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i11 + 24)));
+ const __m256i vk11xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 376 * sizeof(int8_t))));
+ i11 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi11x01234567, vk11x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi11x89ABCDEF, vk11x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi11xGHIJKLMN, vk11xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi11xOPQRSTUV, vk11xOPQRSTUV));
+
+ const __m256i vi12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i12));
+ const __m256i vk12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 384 * sizeof(int8_t))));
+ const __m256i vi12x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i12 + 8)));
+ const __m256i vk12x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 392 * sizeof(int8_t))));
+ const __m256i vi12xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i12 + 16)));
+ const __m256i vk12xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 400 * sizeof(int8_t))));
+ const __m256i vi12xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i12 + 24)));
+ const __m256i vk12xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 408 * sizeof(int8_t))));
+ i12 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi12x01234567, vk12x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi12x89ABCDEF, vk12x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi12xGHIJKLMN, vk12xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi12xOPQRSTUV, vk12xOPQRSTUV));
+
+ const __m256i vi13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i13));
+ const __m256i vk13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 416 * sizeof(int8_t))));
+ const __m256i vi13x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i13 + 8)));
+ const __m256i vk13x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 424 * sizeof(int8_t))));
+ const __m256i vi13xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i13 + 16)));
+ const __m256i vk13xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 432 * sizeof(int8_t))));
+ const __m256i vi13xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i13 + 24)));
+ const __m256i vk13xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 440 * sizeof(int8_t))));
+ i13 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi13x01234567, vk13x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi13x89ABCDEF, vk13x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi13xGHIJKLMN, vk13xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi13xOPQRSTUV, vk13xOPQRSTUV));
+
+ const __m256i vi14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i14));
+ const __m256i vk14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 448 * sizeof(int8_t))));
+ const __m256i vi14x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i14 + 8)));
+ const __m256i vk14x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 456 * sizeof(int8_t))));
+ const __m256i vi14xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i14 + 16)));
+ const __m256i vk14xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 464 * sizeof(int8_t))));
+ const __m256i vi14xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i14 + 24)));
+ const __m256i vk14xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 472 * sizeof(int8_t))));
+ i14 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi14x01234567, vk14x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi14x89ABCDEF, vk14x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi14xGHIJKLMN, vk14xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi14xOPQRSTUV, vk14xOPQRSTUV));
+
+ const __m256i vi15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i15));
+ const __m256i vk15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 480 * sizeof(int8_t))));
+ const __m256i vi15x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i15 + 8)));
+ const __m256i vk15x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 488 * sizeof(int8_t))));
+ const __m256i vi15xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i15 + 16)));
+ const __m256i vk15xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 496 * sizeof(int8_t))));
+ const __m256i vi15xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i15 + 24)));
+ const __m256i vk15xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 504 * sizeof(int8_t))));
+ i15 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi15x01234567, vk15x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi15x89ABCDEF, vk15x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi15xGHIJKLMN, vk15xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi15xOPQRSTUV, vk15xOPQRSTUV));
+
+ const __m256i vi16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i16));
+ const __m256i vk16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 512 * sizeof(int8_t))));
+ const __m256i vi16x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i16 + 8)));
+ const __m256i vk16x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 520 * sizeof(int8_t))));
+ const __m256i vi16xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i16 + 16)));
+ const __m256i vk16xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 528 * sizeof(int8_t))));
+ const __m256i vi16xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i16 + 24)));
+ const __m256i vk16xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 536 * sizeof(int8_t))));
+ i16 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi16x01234567, vk16x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi16x89ABCDEF, vk16x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi16xGHIJKLMN, vk16xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi16xOPQRSTUV, vk16xOPQRSTUV));
+
+ const __m256i vi17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i17));
+ const __m256i vk17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 544 * sizeof(int8_t))));
+ const __m256i vi17x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i17 + 8)));
+ const __m256i vk17x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 552 * sizeof(int8_t))));
+ const __m256i vi17xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i17 + 16)));
+ const __m256i vk17xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 560 * sizeof(int8_t))));
+ const __m256i vi17xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i17 + 24)));
+ const __m256i vk17xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 568 * sizeof(int8_t))));
+ i17 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi17x01234567, vk17x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi17x89ABCDEF, vk17x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi17xGHIJKLMN, vk17xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi17xOPQRSTUV, vk17xOPQRSTUV));
+
+ const __m256i vi18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i18));
+ const __m256i vk18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 576 * sizeof(int8_t))));
+ const __m256i vi18x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i18 + 8)));
+ const __m256i vk18x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 584 * sizeof(int8_t))));
+ const __m256i vi18xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i18 + 16)));
+ const __m256i vk18xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 592 * sizeof(int8_t))));
+ const __m256i vi18xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i18 + 24)));
+ const __m256i vk18xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 600 * sizeof(int8_t))));
+ i18 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi18x01234567, vk18x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi18x89ABCDEF, vk18x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi18xGHIJKLMN, vk18xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi18xOPQRSTUV, vk18xOPQRSTUV));
+
+ const __m256i vi19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i19));
+ const __m256i vk19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 608 * sizeof(int8_t))));
+ const __m256i vi19x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i19 + 8)));
+ const __m256i vk19x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 616 * sizeof(int8_t))));
+ const __m256i vi19xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i19 + 16)));
+ const __m256i vk19xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 624 * sizeof(int8_t))));
+ const __m256i vi19xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i19 + 24)));
+ const __m256i vk19xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 632 * sizeof(int8_t))));
+ i19 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi19x01234567, vk19x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi19x89ABCDEF, vk19x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi19xGHIJKLMN, vk19xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi19xOPQRSTUV, vk19xOPQRSTUV));
+
+ const __m256i vi20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i20));
+ const __m256i vk20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 640 * sizeof(int8_t))));
+ const __m256i vi20x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i20 + 8)));
+ const __m256i vk20x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 648 * sizeof(int8_t))));
+ const __m256i vi20xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i20 + 16)));
+ const __m256i vk20xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 656 * sizeof(int8_t))));
+ const __m256i vi20xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i20 + 24)));
+ const __m256i vk20xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 664 * sizeof(int8_t))));
+ i20 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi20x01234567, vk20x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi20x89ABCDEF, vk20x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi20xGHIJKLMN, vk20xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi20xOPQRSTUV, vk20xOPQRSTUV));
+
+ const __m256i vi21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i21));
+ const __m256i vk21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 672 * sizeof(int8_t))));
+ const __m256i vi21x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i21 + 8)));
+ const __m256i vk21x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 680 * sizeof(int8_t))));
+ const __m256i vi21xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i21 + 16)));
+ const __m256i vk21xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 688 * sizeof(int8_t))));
+ const __m256i vi21xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i21 + 24)));
+ const __m256i vk21xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 696 * sizeof(int8_t))));
+ i21 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi21x01234567, vk21x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi21x89ABCDEF, vk21x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi21xGHIJKLMN, vk21xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi21xOPQRSTUV, vk21xOPQRSTUV));
+
+ const __m256i vi22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i22));
+ const __m256i vk22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 704 * sizeof(int8_t))));
+ const __m256i vi22x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i22 + 8)));
+ const __m256i vk22x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 712 * sizeof(int8_t))));
+ const __m256i vi22xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i22 + 16)));
+ const __m256i vk22xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 720 * sizeof(int8_t))));
+ const __m256i vi22xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i22 + 24)));
+ const __m256i vk22xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 728 * sizeof(int8_t))));
+ i22 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi22x01234567, vk22x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi22x89ABCDEF, vk22x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi22xGHIJKLMN, vk22xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi22xOPQRSTUV, vk22xOPQRSTUV));
+
+ const __m256i vi23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i23));
+ const __m256i vk23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 736 * sizeof(int8_t))));
+ const __m256i vi23x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i23 + 8)));
+ const __m256i vk23x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 744 * sizeof(int8_t))));
+ const __m256i vi23xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i23 + 16)));
+ const __m256i vk23xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 752 * sizeof(int8_t))));
+ const __m256i vi23xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i23 + 24)));
+ const __m256i vk23xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 760 * sizeof(int8_t))));
+ i23 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi23x01234567, vk23x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi23x89ABCDEF, vk23x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi23xGHIJKLMN, vk23xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi23xOPQRSTUV, vk23xOPQRSTUV));
+
+ const __m256i vi24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i24));
+ const __m256i vk24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 768 * sizeof(int8_t))));
+ const __m256i vi24x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i24 + 8)));
+ const __m256i vk24x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 776 * sizeof(int8_t))));
+ const __m256i vi24xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i24 + 16)));
+ const __m256i vk24xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 784 * sizeof(int8_t))));
+ const __m256i vi24xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i24 + 24)));
+ const __m256i vk24xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 792 * sizeof(int8_t))));
+ i24 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi24x89ABCDEF, vk24x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi24xGHIJKLMN, vk24xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi24xOPQRSTUV, vk24xOPQRSTUV));
+
+ w = (const void*) ((uintptr_t) w + 32 * sizeof(int32_t) + 800 * sizeof(int8_t));
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ __m256 vscaled89ABCDEF = _mm256_cvtepi32_ps(vacc89ABCDEF);
+ __m256 vscaledGHIJKLMN = _mm256_cvtepi32_ps(vaccGHIJKLMN);
+ __m256 vscaledOPQRSTUV = _mm256_cvtepi32_ps(vaccOPQRSTUV);
+
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w);
+ const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(float)));
+ const __m256 vscaleGHIJKLMN = _mm256_loadu_ps((const float*) ((uintptr_t) w + 16 * sizeof(float)));
+ const __m256 vscaleOPQRSTUV = _mm256_loadu_ps((const float*) ((uintptr_t) w + 24 * sizeof(float)));
+ w = (const void*) ((uintptr_t) w + 32 * sizeof(float));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vscaled89ABCDEF = _mm256_mul_ps(vscaled89ABCDEF, vscale89ABCDEF);
+ vscaledGHIJKLMN = _mm256_mul_ps(vscaledGHIJKLMN, vscaleGHIJKLMN);
+ vscaledOPQRSTUV = _mm256_mul_ps(vscaledOPQRSTUV, vscaleOPQRSTUV);
+
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+ vaccGHIJKLMN = _mm256_cvtps_epi32(vscaledGHIJKLMN);
+ vaccOPQRSTUV = _mm256_cvtps_epi32(vscaledOPQRSTUV);
+
+ const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
+ __m256i voutGHIJOPQRKLMNSTUV = _mm256_adds_epi16(_mm256_packs_epi32(vaccGHIJKLMN, vaccOPQRSTUV), voutput_zero_point);
+
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+ __m128i voutGHIJKLMNOPQRSTUV = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(voutGHIJOPQRKLMNSTUV), _mm256_extracti128_si256(voutGHIJOPQRKLMNSTUV, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
+ vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
+ voutGHIJKLMNOPQRSTUV = _mm_max_epi8(voutGHIJKLMNOPQRSTUV, voutput_min);
+ voutGHIJKLMNOPQRSTUV = _mm_min_epi8(voutGHIJKLMNOPQRSTUV, voutput_max);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ _mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
+ output += 32;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ const int8_t* k = (const int8_t*) ((uintptr_t) w + 32 * sizeof(int32_t));
+ do {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+
+
+ const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
+ const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) k));
+ i0 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
+
+ const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
+ const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 32)));
+ i1 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
+
+ const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
+ const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 64)));
+ i2 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
+
+ const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
+ const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 96)));
+ i3 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
+
+ const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
+ const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 128)));
+ i4 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
+
+ const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
+ const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 160)));
+ i5 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
+
+ const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
+ const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 192)));
+ i6 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
+
+ const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
+ const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 224)));
+ i7 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
+
+ const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
+ const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 256)));
+ i8 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
+
+ const __m256i vi9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i9));
+ const __m256i vk9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 288)));
+ i9 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi9x01234567, vk9x01234567));
+
+ const __m256i vi10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i10));
+ const __m256i vk10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 320)));
+ i10 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi10x01234567, vk10x01234567));
+
+ const __m256i vi11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i11));
+ const __m256i vk11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 352)));
+ i11 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi11x01234567, vk11x01234567));
+
+ const __m256i vi12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i12));
+ const __m256i vk12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 384)));
+ i12 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi12x01234567, vk12x01234567));
+
+ const __m256i vi13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i13));
+ const __m256i vk13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 416)));
+ i13 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi13x01234567, vk13x01234567));
+
+ const __m256i vi14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i14));
+ const __m256i vk14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 448)));
+ i14 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi14x01234567, vk14x01234567));
+
+ const __m256i vi15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i15));
+ const __m256i vk15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 480)));
+ i15 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi15x01234567, vk15x01234567));
+
+ const __m256i vi16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i16));
+ const __m256i vk16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 512)));
+ i16 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi16x01234567, vk16x01234567));
+
+ const __m256i vi17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i17));
+ const __m256i vk17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 544)));
+ i17 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi17x01234567, vk17x01234567));
+
+ const __m256i vi18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i18));
+ const __m256i vk18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 576)));
+ i18 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi18x01234567, vk18x01234567));
+
+ const __m256i vi19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i19));
+ const __m256i vk19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 608)));
+ i19 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi19x01234567, vk19x01234567));
+
+ const __m256i vi20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i20));
+ const __m256i vk20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 640)));
+ i20 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi20x01234567, vk20x01234567));
+
+ const __m256i vi21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i21));
+ const __m256i vk21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 672)));
+ i21 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi21x01234567, vk21x01234567));
+
+ const __m256i vi22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i22));
+ const __m256i vk22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 704)));
+ i22 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi22x01234567, vk22x01234567));
+
+ const __m256i vi23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i23));
+ const __m256i vk23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 736)));
+ i23 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi23x01234567, vk23x01234567));
+
+ const __m256i vi24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i24));
+ const __m256i vk24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 768)));
+ i24 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
+
+ k += 8;
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) ((uintptr_t) w + 32 * sizeof(int32_t) + 800 * sizeof(int8_t)));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
+ vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
+
+ if XNN_LIKELY(c >= 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ c -= 8;
+ } else {
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ output += 1;
+ }
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qc8-dwconv/gen/up32x9-minmax-fp32-avx2-mul16.c b/src/qc8-dwconv/gen/up32x9-minmax-fp32-avx2-mul16.c
new file mode 100644
index 0000000..b015b93
--- /dev/null
+++ b/src/qc8-dwconv/gen/up32x9-minmax-fp32-avx2-mul16.c
@@ -0,0 +1,407 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/unipass-avx2-mul16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up32x9__avx2_mul16(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const void* w = weights;
+ for (; c >= 32; c -= 32) {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+ __m256i vacc89ABCDEF = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+ __m256i vaccGHIJKLMN = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 16 * sizeof(int32_t)));
+ __m256i vaccOPQRSTUV = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 24 * sizeof(int32_t)));
+
+
+ const __m256i vi0x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i0));
+ const __m256i vk0x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 0 * sizeof(int8_t))));
+ const __m256i vi0xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i0 + 16)));
+ const __m256i vk0xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 16 * sizeof(int8_t))));
+ i0 += 32;
+
+ const __m256i vprod0x0123456789ABCDEF = _mm256_mullo_epi16(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF);
+ const __m128i vprod0x89ABCDEF = _mm256_extracti128_si256(vprod0x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod0x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod0x89ABCDEF));
+ const __m256i vprod0xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi0xGHIJKLMNOPQRSTUV, vk0xGHIJKLMNOPQRSTUV);
+ const __m128i vprod0xOPQRSTUV = _mm256_extracti128_si256(vprod0xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod0xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod0xOPQRSTUV));
+
+ const __m256i vi1x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i1));
+ const __m256i vk1x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 32 * sizeof(int8_t))));
+ const __m256i vi1xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i1 + 16)));
+ const __m256i vk1xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 48 * sizeof(int8_t))));
+ i1 += 32;
+
+ const __m256i vprod1x0123456789ABCDEF = _mm256_mullo_epi16(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
+ const __m128i vprod1x89ABCDEF = _mm256_extracti128_si256(vprod1x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod1x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod1x89ABCDEF));
+ const __m256i vprod1xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi1xGHIJKLMNOPQRSTUV, vk1xGHIJKLMNOPQRSTUV);
+ const __m128i vprod1xOPQRSTUV = _mm256_extracti128_si256(vprod1xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod1xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod1xOPQRSTUV));
+
+ const __m256i vi2x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i2));
+ const __m256i vk2x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 64 * sizeof(int8_t))));
+ const __m256i vi2xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i2 + 16)));
+ const __m256i vk2xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 80 * sizeof(int8_t))));
+ i2 += 32;
+
+ const __m256i vprod2x0123456789ABCDEF = _mm256_mullo_epi16(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF);
+ const __m128i vprod2x89ABCDEF = _mm256_extracti128_si256(vprod2x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod2x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod2x89ABCDEF));
+ const __m256i vprod2xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi2xGHIJKLMNOPQRSTUV, vk2xGHIJKLMNOPQRSTUV);
+ const __m128i vprod2xOPQRSTUV = _mm256_extracti128_si256(vprod2xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod2xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod2xOPQRSTUV));
+
+ const __m256i vi3x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i3));
+ const __m256i vk3x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 96 * sizeof(int8_t))));
+ const __m256i vi3xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i3 + 16)));
+ const __m256i vk3xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 112 * sizeof(int8_t))));
+ i3 += 32;
+
+ const __m256i vprod3x0123456789ABCDEF = _mm256_mullo_epi16(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF);
+ const __m128i vprod3x89ABCDEF = _mm256_extracti128_si256(vprod3x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod3x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod3x89ABCDEF));
+ const __m256i vprod3xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi3xGHIJKLMNOPQRSTUV, vk3xGHIJKLMNOPQRSTUV);
+ const __m128i vprod3xOPQRSTUV = _mm256_extracti128_si256(vprod3xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod3xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod3xOPQRSTUV));
+
+ const __m256i vi4x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i4));
+ const __m256i vk4x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 128 * sizeof(int8_t))));
+ const __m256i vi4xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i4 + 16)));
+ const __m256i vk4xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 144 * sizeof(int8_t))));
+ i4 += 32;
+
+ const __m256i vprod4x0123456789ABCDEF = _mm256_mullo_epi16(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF);
+ const __m128i vprod4x89ABCDEF = _mm256_extracti128_si256(vprod4x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod4x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod4x89ABCDEF));
+ const __m256i vprod4xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi4xGHIJKLMNOPQRSTUV, vk4xGHIJKLMNOPQRSTUV);
+ const __m128i vprod4xOPQRSTUV = _mm256_extracti128_si256(vprod4xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod4xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod4xOPQRSTUV));
+
+ const __m256i vi5x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i5));
+ const __m256i vk5x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 160 * sizeof(int8_t))));
+ const __m256i vi5xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i5 + 16)));
+ const __m256i vk5xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 176 * sizeof(int8_t))));
+ i5 += 32;
+
+ const __m256i vprod5x0123456789ABCDEF = _mm256_mullo_epi16(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF);
+ const __m128i vprod5x89ABCDEF = _mm256_extracti128_si256(vprod5x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod5x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod5x89ABCDEF));
+ const __m256i vprod5xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi5xGHIJKLMNOPQRSTUV, vk5xGHIJKLMNOPQRSTUV);
+ const __m128i vprod5xOPQRSTUV = _mm256_extracti128_si256(vprod5xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod5xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod5xOPQRSTUV));
+
+ const __m256i vi6x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i6));
+ const __m256i vk6x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 192 * sizeof(int8_t))));
+ const __m256i vi6xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i6 + 16)));
+ const __m256i vk6xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 208 * sizeof(int8_t))));
+ i6 += 32;
+
+ const __m256i vprod6x0123456789ABCDEF = _mm256_mullo_epi16(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF);
+ const __m128i vprod6x89ABCDEF = _mm256_extracti128_si256(vprod6x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod6x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod6x89ABCDEF));
+ const __m256i vprod6xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi6xGHIJKLMNOPQRSTUV, vk6xGHIJKLMNOPQRSTUV);
+ const __m128i vprod6xOPQRSTUV = _mm256_extracti128_si256(vprod6xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod6xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod6xOPQRSTUV));
+
+ const __m256i vi7x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i7));
+ const __m256i vk7x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 224 * sizeof(int8_t))));
+ const __m256i vi7xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i7 + 16)));
+ const __m256i vk7xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 240 * sizeof(int8_t))));
+ i7 += 32;
+
+ const __m256i vprod7x0123456789ABCDEF = _mm256_mullo_epi16(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF);
+ const __m128i vprod7x89ABCDEF = _mm256_extracti128_si256(vprod7x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod7x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod7x89ABCDEF));
+ const __m256i vprod7xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi7xGHIJKLMNOPQRSTUV, vk7xGHIJKLMNOPQRSTUV);
+ const __m128i vprod7xOPQRSTUV = _mm256_extracti128_si256(vprod7xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod7xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod7xOPQRSTUV));
+
+ const __m256i vi8x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i8));
+ const __m256i vk8x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 256 * sizeof(int8_t))));
+ const __m256i vi8xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (i8 + 16)));
+ const __m256i vk8xGHIJKLMNOPQRSTUV = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 272 * sizeof(int8_t))));
+ i8 += 32;
+
+ const __m256i vprod8x0123456789ABCDEF = _mm256_mullo_epi16(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF);
+ const __m128i vprod8x89ABCDEF = _mm256_extracti128_si256(vprod8x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod8x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod8x89ABCDEF));
+ const __m256i vprod8xGHIJKLMNOPQRSTUV = _mm256_mullo_epi16(vi8xGHIJKLMNOPQRSTUV, vk8xGHIJKLMNOPQRSTUV);
+ const __m128i vprod8xOPQRSTUV = _mm256_extracti128_si256(vprod8xGHIJKLMNOPQRSTUV, 1);
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod8xGHIJKLMNOPQRSTUV)));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_cvtepi16_epi32(vprod8xOPQRSTUV));
+
+ w = (const void*) ((uintptr_t) w + 32 * sizeof(int32_t) + 288 * sizeof(int8_t));
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ __m256 vscaled89ABCDEF = _mm256_cvtepi32_ps(vacc89ABCDEF);
+ __m256 vscaledGHIJKLMN = _mm256_cvtepi32_ps(vaccGHIJKLMN);
+ __m256 vscaledOPQRSTUV = _mm256_cvtepi32_ps(vaccOPQRSTUV);
+
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w);
+ const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(float)));
+ const __m256 vscaleGHIJKLMN = _mm256_loadu_ps((const float*) ((uintptr_t) w + 16 * sizeof(float)));
+ const __m256 vscaleOPQRSTUV = _mm256_loadu_ps((const float*) ((uintptr_t) w + 24 * sizeof(float)));
+ w = (const void*) ((uintptr_t) w + 32 * sizeof(float));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vscaled89ABCDEF = _mm256_mul_ps(vscaled89ABCDEF, vscale89ABCDEF);
+ vscaledGHIJKLMN = _mm256_mul_ps(vscaledGHIJKLMN, vscaleGHIJKLMN);
+ vscaledOPQRSTUV = _mm256_mul_ps(vscaledOPQRSTUV, vscaleOPQRSTUV);
+
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+ vaccGHIJKLMN = _mm256_cvtps_epi32(vscaledGHIJKLMN);
+ vaccOPQRSTUV = _mm256_cvtps_epi32(vscaledOPQRSTUV);
+
+ const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
+ const __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
+ const __m256i voutGHIJOPQRKLMNSTUV = _mm256_adds_epi16(_mm256_packs_epi32(vaccGHIJKLMN, vaccOPQRSTUV), voutput_zero_point);
+
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+ __m128i voutGHIJKLMNOPQRSTUV = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(voutGHIJOPQRKLMNSTUV), _mm256_extracti128_si256(voutGHIJOPQRKLMNSTUV, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
+ vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
+ voutGHIJKLMNOPQRSTUV = _mm_max_epi8(voutGHIJKLMNOPQRSTUV, voutput_min);
+ voutGHIJKLMNOPQRSTUV = _mm_min_epi8(voutGHIJKLMNOPQRSTUV, voutput_max);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ _mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
+ output += 32;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ const int8_t* k = (const int8_t*) ((uintptr_t) w + 32 * sizeof(int32_t));
+ do {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+ __m256i vacc89ABCDEF = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+
+
+ const __m256i vi0x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i0));
+ const __m256i vk0x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) k));
+ i0 += 16;
+
+ const __m256i vprod0x0123456789ABCDEF = _mm256_mullo_epi16(vi0x0123456789ABCDEF, vk0x0123456789ABCDEF);
+ const __m128i vprod0x89ABCDEF = _mm256_extracti128_si256(vprod0x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod0x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod0x89ABCDEF));
+
+ const __m256i vi1x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i1));
+ const __m256i vk1x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 32)));
+ i1 += 16;
+
+ const __m256i vprod1x0123456789ABCDEF = _mm256_mullo_epi16(vi1x0123456789ABCDEF, vk1x0123456789ABCDEF);
+ const __m128i vprod1x89ABCDEF = _mm256_extracti128_si256(vprod1x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod1x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod1x89ABCDEF));
+
+ const __m256i vi2x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i2));
+ const __m256i vk2x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 64)));
+ i2 += 16;
+
+ const __m256i vprod2x0123456789ABCDEF = _mm256_mullo_epi16(vi2x0123456789ABCDEF, vk2x0123456789ABCDEF);
+ const __m128i vprod2x89ABCDEF = _mm256_extracti128_si256(vprod2x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod2x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod2x89ABCDEF));
+
+ const __m256i vi3x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i3));
+ const __m256i vk3x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 96)));
+ i3 += 16;
+
+ const __m256i vprod3x0123456789ABCDEF = _mm256_mullo_epi16(vi3x0123456789ABCDEF, vk3x0123456789ABCDEF);
+ const __m128i vprod3x89ABCDEF = _mm256_extracti128_si256(vprod3x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod3x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod3x89ABCDEF));
+
+ const __m256i vi4x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i4));
+ const __m256i vk4x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 128)));
+ i4 += 16;
+
+ const __m256i vprod4x0123456789ABCDEF = _mm256_mullo_epi16(vi4x0123456789ABCDEF, vk4x0123456789ABCDEF);
+ const __m128i vprod4x89ABCDEF = _mm256_extracti128_si256(vprod4x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod4x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod4x89ABCDEF));
+
+ const __m256i vi5x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i5));
+ const __m256i vk5x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 160)));
+ i5 += 16;
+
+ const __m256i vprod5x0123456789ABCDEF = _mm256_mullo_epi16(vi5x0123456789ABCDEF, vk5x0123456789ABCDEF);
+ const __m128i vprod5x89ABCDEF = _mm256_extracti128_si256(vprod5x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod5x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod5x89ABCDEF));
+
+ const __m256i vi6x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i6));
+ const __m256i vk6x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 192)));
+ i6 += 16;
+
+ const __m256i vprod6x0123456789ABCDEF = _mm256_mullo_epi16(vi6x0123456789ABCDEF, vk6x0123456789ABCDEF);
+ const __m128i vprod6x89ABCDEF = _mm256_extracti128_si256(vprod6x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod6x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod6x89ABCDEF));
+
+ const __m256i vi7x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i7));
+ const __m256i vk7x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 224)));
+ i7 += 16;
+
+ const __m256i vprod7x0123456789ABCDEF = _mm256_mullo_epi16(vi7x0123456789ABCDEF, vk7x0123456789ABCDEF);
+ const __m128i vprod7x89ABCDEF = _mm256_extracti128_si256(vprod7x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod7x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod7x89ABCDEF));
+
+ const __m256i vi8x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) i8));
+ const __m256i vk8x0123456789ABCDEF = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (k + 256)));
+ i8 += 16;
+
+ const __m256i vprod8x0123456789ABCDEF = _mm256_mullo_epi16(vi8x0123456789ABCDEF, vk8x0123456789ABCDEF);
+ const __m128i vprod8x89ABCDEF = _mm256_extracti128_si256(vprod8x0123456789ABCDEF, 1);
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod8x0123456789ABCDEF)));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod8x89ABCDEF));
+
+ k += 16;
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ __m256 vscaled89ABCDEF = _mm256_cvtepi32_ps(vacc89ABCDEF);
+
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) ((uintptr_t) w + 32 * sizeof(int32_t) + 288 * sizeof(int8_t)));
+ const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) ((uintptr_t) w + 32 * sizeof(int32_t) + 288 * sizeof(int8_t) + 8 * sizeof(float)));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vscaled89ABCDEF = _mm256_mul_ps(vscaled89ABCDEF, vscale89ABCDEF);
+
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+ __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extracti128_si256(vacc89ABCDEF, 1)), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+
+ __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
+ vout0123456789ABCDEF = _mm_min_epi8(_mm_max_epi8(vout0123456789ABCDEF, voutput_min), voutput_max);
+
+ if XNN_LIKELY(c >= 16) {
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ output += 16;
+ c -= 16;
+ } else {
+ if (c & 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456789ABCDEF);
+ vout0123456789ABCDEF = _mm_unpackhi_epi64(vout0123456789ABCDEF, vout0123456789ABCDEF);
+ output += 8;
+ }
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456789ABCDEF);
+ vout0123456789ABCDEF = _mm_srli_epi64(vout0123456789ABCDEF, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456789ABCDEF, 0);
+ vout0123456789ABCDEF = _mm_srli_epi32(vout0123456789ABCDEF, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456789ABCDEF, 0);
+ output += 1;
+ }
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qc8-dwconv/gen/up32x9-minmax-fp32-avx2-mul32.c b/src/qc8-dwconv/gen/up32x9-minmax-fp32-avx2-mul32.c
new file mode 100644
index 0000000..214eca0
--- /dev/null
+++ b/src/qc8-dwconv/gen/up32x9-minmax-fp32-avx2-mul32.c
@@ -0,0 +1,368 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/unipass-avx2-mul32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up32x9__avx2_mul32(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const void* w = weights;
+ for (; c >= 32; c -= 32) {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+ __m256i vacc89ABCDEF = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+ __m256i vaccGHIJKLMN = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 16 * sizeof(int32_t)));
+ __m256i vaccOPQRSTUV = _mm256_loadu_si256((const __m256i*) ((uintptr_t) w + 24 * sizeof(int32_t)));
+
+
+ const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
+ const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 0 * sizeof(int8_t))));
+ const __m256i vi0x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
+ const __m256i vk0x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 8 * sizeof(int8_t))));
+ const __m256i vi0xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i0 + 16)));
+ const __m256i vk0xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 16 * sizeof(int8_t))));
+ const __m256i vi0xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i0 + 24)));
+ const __m256i vk0xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 24 * sizeof(int8_t))));
+ i0 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi0x89ABCDEF, vk0x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi0xGHIJKLMN, vk0xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi0xOPQRSTUV, vk0xOPQRSTUV));
+
+ const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
+ const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 32 * sizeof(int8_t))));
+ const __m256i vi1x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
+ const __m256i vk1x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 40 * sizeof(int8_t))));
+ const __m256i vi1xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i1 + 16)));
+ const __m256i vk1xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 48 * sizeof(int8_t))));
+ const __m256i vi1xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i1 + 24)));
+ const __m256i vk1xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 56 * sizeof(int8_t))));
+ i1 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi1x89ABCDEF, vk1x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi1xGHIJKLMN, vk1xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi1xOPQRSTUV, vk1xOPQRSTUV));
+
+ const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
+ const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 64 * sizeof(int8_t))));
+ const __m256i vi2x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
+ const __m256i vk2x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 72 * sizeof(int8_t))));
+ const __m256i vi2xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i2 + 16)));
+ const __m256i vk2xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 80 * sizeof(int8_t))));
+ const __m256i vi2xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i2 + 24)));
+ const __m256i vk2xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 88 * sizeof(int8_t))));
+ i2 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi2x89ABCDEF, vk2x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi2xGHIJKLMN, vk2xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi2xOPQRSTUV, vk2xOPQRSTUV));
+
+ const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
+ const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 96 * sizeof(int8_t))));
+ const __m256i vi3x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
+ const __m256i vk3x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 104 * sizeof(int8_t))));
+ const __m256i vi3xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i3 + 16)));
+ const __m256i vk3xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 112 * sizeof(int8_t))));
+ const __m256i vi3xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i3 + 24)));
+ const __m256i vk3xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 120 * sizeof(int8_t))));
+ i3 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi3x89ABCDEF, vk3x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi3xGHIJKLMN, vk3xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi3xOPQRSTUV, vk3xOPQRSTUV));
+
+ const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
+ const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 128 * sizeof(int8_t))));
+ const __m256i vi4x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
+ const __m256i vk4x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 136 * sizeof(int8_t))));
+ const __m256i vi4xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i4 + 16)));
+ const __m256i vk4xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 144 * sizeof(int8_t))));
+ const __m256i vi4xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i4 + 24)));
+ const __m256i vk4xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 152 * sizeof(int8_t))));
+ i4 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi4x89ABCDEF, vk4x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi4xGHIJKLMN, vk4xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi4xOPQRSTUV, vk4xOPQRSTUV));
+
+ const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
+ const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 160 * sizeof(int8_t))));
+ const __m256i vi5x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
+ const __m256i vk5x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 168 * sizeof(int8_t))));
+ const __m256i vi5xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i5 + 16)));
+ const __m256i vk5xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 176 * sizeof(int8_t))));
+ const __m256i vi5xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i5 + 24)));
+ const __m256i vk5xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 184 * sizeof(int8_t))));
+ i5 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi5x89ABCDEF, vk5x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi5xGHIJKLMN, vk5xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi5xOPQRSTUV, vk5xOPQRSTUV));
+
+ const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
+ const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 192 * sizeof(int8_t))));
+ const __m256i vi6x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
+ const __m256i vk6x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 200 * sizeof(int8_t))));
+ const __m256i vi6xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i6 + 16)));
+ const __m256i vk6xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 208 * sizeof(int8_t))));
+ const __m256i vi6xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i6 + 24)));
+ const __m256i vk6xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 216 * sizeof(int8_t))));
+ i6 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi6x89ABCDEF, vk6x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi6xGHIJKLMN, vk6xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi6xOPQRSTUV, vk6xOPQRSTUV));
+
+ const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
+ const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 224 * sizeof(int8_t))));
+ const __m256i vi7x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i7 + 8)));
+ const __m256i vk7x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 232 * sizeof(int8_t))));
+ const __m256i vi7xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i7 + 16)));
+ const __m256i vk7xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 240 * sizeof(int8_t))));
+ const __m256i vi7xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i7 + 24)));
+ const __m256i vk7xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 248 * sizeof(int8_t))));
+ i7 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi7x89ABCDEF, vk7x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi7xGHIJKLMN, vk7xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi7xOPQRSTUV, vk7xOPQRSTUV));
+
+ const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
+ const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 256 * sizeof(int8_t))));
+ const __m256i vi8x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i8 + 8)));
+ const __m256i vk8x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 264 * sizeof(int8_t))));
+ const __m256i vi8xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i8 + 16)));
+ const __m256i vk8xGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 272 * sizeof(int8_t))));
+ const __m256i vi8xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i8 + 24)));
+ const __m256i vk8xOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 32 * sizeof(int32_t) + 280 * sizeof(int8_t))));
+ i8 += 32;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
+ vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi8x89ABCDEF, vk8x89ABCDEF));
+ vaccGHIJKLMN = _mm256_add_epi32(vaccGHIJKLMN, _mm256_mullo_epi32(vi8xGHIJKLMN, vk8xGHIJKLMN));
+ vaccOPQRSTUV = _mm256_add_epi32(vaccOPQRSTUV, _mm256_mullo_epi32(vi8xOPQRSTUV, vk8xOPQRSTUV));
+
+ w = (const void*) ((uintptr_t) w + 32 * sizeof(int32_t) + 288 * sizeof(int8_t));
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ __m256 vscaled89ABCDEF = _mm256_cvtepi32_ps(vacc89ABCDEF);
+ __m256 vscaledGHIJKLMN = _mm256_cvtepi32_ps(vaccGHIJKLMN);
+ __m256 vscaledOPQRSTUV = _mm256_cvtepi32_ps(vaccOPQRSTUV);
+
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w);
+ const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(float)));
+ const __m256 vscaleGHIJKLMN = _mm256_loadu_ps((const float*) ((uintptr_t) w + 16 * sizeof(float)));
+ const __m256 vscaleOPQRSTUV = _mm256_loadu_ps((const float*) ((uintptr_t) w + 24 * sizeof(float)));
+ w = (const void*) ((uintptr_t) w + 32 * sizeof(float));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vscaled89ABCDEF = _mm256_mul_ps(vscaled89ABCDEF, vscale89ABCDEF);
+ vscaledGHIJKLMN = _mm256_mul_ps(vscaledGHIJKLMN, vscaleGHIJKLMN);
+ vscaledOPQRSTUV = _mm256_mul_ps(vscaledOPQRSTUV, vscaleOPQRSTUV);
+
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+ vaccGHIJKLMN = _mm256_cvtps_epi32(vscaledGHIJKLMN);
+ vaccOPQRSTUV = _mm256_cvtps_epi32(vscaledOPQRSTUV);
+
+ const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
+ __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
+ __m256i voutGHIJOPQRKLMNSTUV = _mm256_adds_epi16(_mm256_packs_epi32(vaccGHIJKLMN, vaccOPQRSTUV), voutput_zero_point);
+
+ __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+ __m128i voutGHIJKLMNOPQRSTUV = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(voutGHIJOPQRKLMNSTUV), _mm256_extracti128_si256(voutGHIJOPQRKLMNSTUV, 1)), _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
+ vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
+ voutGHIJKLMNOPQRSTUV = _mm_max_epi8(voutGHIJKLMNOPQRSTUV, voutput_min);
+ voutGHIJKLMNOPQRSTUV = _mm_min_epi8(voutGHIJKLMNOPQRSTUV, voutput_max);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ _mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
+ output += 32;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ const int8_t* k = (const int8_t*) ((uintptr_t) w + 32 * sizeof(int32_t));
+ do {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+
+
+ const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
+ const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) k));
+ i0 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
+
+ const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
+ const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 32)));
+ i1 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
+
+ const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
+ const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 64)));
+ i2 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
+
+ const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
+ const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 96)));
+ i3 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
+
+ const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
+ const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 128)));
+ i4 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
+
+ const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
+ const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 160)));
+ i5 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
+
+ const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
+ const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 192)));
+ i6 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
+
+ const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
+ const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 224)));
+ i7 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
+
+ const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
+ const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 256)));
+ i8 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
+
+ k += 8;
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) ((uintptr_t) w + 32 * sizeof(int32_t) + 288 * sizeof(int8_t)));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
+ vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
+
+ if XNN_LIKELY(c >= 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ c -= 8;
+ } else {
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ output += 1;
+ }
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qc8-dwconv/gen/up8x25-minmax-fp32-avx2-mul32.c b/src/qc8-dwconv/gen/up8x25-minmax-fp32-avx2-mul32.c
new file mode 100644
index 0000000..06c7cd1
--- /dev/null
+++ b/src/qc8-dwconv/gen/up8x25-minmax-fp32-avx2-mul32.c
@@ -0,0 +1,504 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/unipass-avx2-mul32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ const int8_t* i9 = input[9];
+ assert(i9 != NULL);
+ if XNN_UNPREDICTABLE(i9 != zero) {
+ i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
+ }
+ const int8_t* i10 = input[10];
+ assert(i10 != NULL);
+ if XNN_UNPREDICTABLE(i10 != zero) {
+ i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
+ }
+ const int8_t* i11 = input[11];
+ assert(i11 != NULL);
+ if XNN_UNPREDICTABLE(i11 != zero) {
+ i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
+ }
+ const int8_t* i12 = input[12];
+ assert(i12 != NULL);
+ if XNN_UNPREDICTABLE(i12 != zero) {
+ i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
+ }
+ const int8_t* i13 = input[13];
+ assert(i13 != NULL);
+ if XNN_UNPREDICTABLE(i13 != zero) {
+ i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
+ }
+ const int8_t* i14 = input[14];
+ assert(i14 != NULL);
+ if XNN_UNPREDICTABLE(i14 != zero) {
+ i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
+ }
+ const int8_t* i15 = input[15];
+ assert(i15 != NULL);
+ if XNN_UNPREDICTABLE(i15 != zero) {
+ i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
+ }
+ const int8_t* i16 = input[16];
+ assert(i16 != NULL);
+ if XNN_UNPREDICTABLE(i16 != zero) {
+ i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
+ }
+ const int8_t* i17 = input[17];
+ assert(i17 != NULL);
+ if XNN_UNPREDICTABLE(i17 != zero) {
+ i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
+ }
+ const int8_t* i18 = input[18];
+ assert(i18 != NULL);
+ if XNN_UNPREDICTABLE(i18 != zero) {
+ i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
+ }
+ const int8_t* i19 = input[19];
+ assert(i19 != NULL);
+ if XNN_UNPREDICTABLE(i19 != zero) {
+ i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
+ }
+ const int8_t* i20 = input[20];
+ assert(i20 != NULL);
+ if XNN_UNPREDICTABLE(i20 != zero) {
+ i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
+ }
+ const int8_t* i21 = input[21];
+ assert(i21 != NULL);
+ if XNN_UNPREDICTABLE(i21 != zero) {
+ i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
+ }
+ const int8_t* i22 = input[22];
+ assert(i22 != NULL);
+ if XNN_UNPREDICTABLE(i22 != zero) {
+ i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
+ }
+ const int8_t* i23 = input[23];
+ assert(i23 != NULL);
+ if XNN_UNPREDICTABLE(i23 != zero) {
+ i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
+ }
+ const int8_t* i24 = input[24];
+ assert(i24 != NULL);
+ if XNN_UNPREDICTABLE(i24 != zero) {
+ i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const void* w = weights;
+ for (; c >= 8; c -= 8) {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+
+
+ const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
+ const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t))));
+ i0 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
+
+ const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
+ const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t))));
+ i1 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
+
+ const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
+ const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t))));
+ i2 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
+
+ const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
+ const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t))));
+ i3 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
+
+ const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
+ const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t))));
+ i4 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
+
+ const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
+ const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t))));
+ i5 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
+
+ const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
+ const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t))));
+ i6 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
+
+ const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
+ const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t))));
+ i7 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
+
+ const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
+ const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t))));
+ i8 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
+
+ const __m256i vi9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i9));
+ const __m256i vk9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t))));
+ i9 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi9x01234567, vk9x01234567));
+
+ const __m256i vi10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i10));
+ const __m256i vk10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 80 * sizeof(int8_t))));
+ i10 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi10x01234567, vk10x01234567));
+
+ const __m256i vi11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i11));
+ const __m256i vk11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 88 * sizeof(int8_t))));
+ i11 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi11x01234567, vk11x01234567));
+
+ const __m256i vi12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i12));
+ const __m256i vk12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 96 * sizeof(int8_t))));
+ i12 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi12x01234567, vk12x01234567));
+
+ const __m256i vi13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i13));
+ const __m256i vk13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 104 * sizeof(int8_t))));
+ i13 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi13x01234567, vk13x01234567));
+
+ const __m256i vi14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i14));
+ const __m256i vk14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 112 * sizeof(int8_t))));
+ i14 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi14x01234567, vk14x01234567));
+
+ const __m256i vi15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i15));
+ const __m256i vk15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 120 * sizeof(int8_t))));
+ i15 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi15x01234567, vk15x01234567));
+
+ const __m256i vi16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i16));
+ const __m256i vk16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 128 * sizeof(int8_t))));
+ i16 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi16x01234567, vk16x01234567));
+
+ const __m256i vi17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i17));
+ const __m256i vk17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 136 * sizeof(int8_t))));
+ i17 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi17x01234567, vk17x01234567));
+
+ const __m256i vi18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i18));
+ const __m256i vk18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 144 * sizeof(int8_t))));
+ i18 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi18x01234567, vk18x01234567));
+
+ const __m256i vi19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i19));
+ const __m256i vk19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 152 * sizeof(int8_t))));
+ i19 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi19x01234567, vk19x01234567));
+
+ const __m256i vi20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i20));
+ const __m256i vk20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 160 * sizeof(int8_t))));
+ i20 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi20x01234567, vk20x01234567));
+
+ const __m256i vi21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i21));
+ const __m256i vk21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 168 * sizeof(int8_t))));
+ i21 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi21x01234567, vk21x01234567));
+
+ const __m256i vi22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i22));
+ const __m256i vk22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 176 * sizeof(int8_t))));
+ i22 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi22x01234567, vk22x01234567));
+
+ const __m256i vi23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i23));
+ const __m256i vk23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 184 * sizeof(int8_t))));
+ i23 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi23x01234567, vk23x01234567));
+
+ const __m256i vi24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i24));
+ const __m256i vk24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 192 * sizeof(int8_t))));
+ i24 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 200 * sizeof(int8_t));
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w);
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(float));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
+ vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
+
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+
+
+ const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
+ const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
+
+ const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
+ const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
+
+ const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
+ const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
+
+ const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
+ const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
+
+ const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
+ const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
+
+ const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
+ const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
+
+ const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
+ const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
+
+ const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
+ const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
+
+ const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
+ const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
+
+ const __m256i vi9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i9));
+ const __m256i vk9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi9x01234567, vk9x01234567));
+
+ const __m256i vi10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i10));
+ const __m256i vk10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 80 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi10x01234567, vk10x01234567));
+
+ const __m256i vi11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i11));
+ const __m256i vk11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 88 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi11x01234567, vk11x01234567));
+
+ const __m256i vi12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i12));
+ const __m256i vk12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 96 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi12x01234567, vk12x01234567));
+
+ const __m256i vi13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i13));
+ const __m256i vk13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 104 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi13x01234567, vk13x01234567));
+
+ const __m256i vi14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i14));
+ const __m256i vk14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 112 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi14x01234567, vk14x01234567));
+
+ const __m256i vi15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i15));
+ const __m256i vk15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 120 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi15x01234567, vk15x01234567));
+
+ const __m256i vi16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i16));
+ const __m256i vk16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 128 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi16x01234567, vk16x01234567));
+
+ const __m256i vi17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i17));
+ const __m256i vk17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 136 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi17x01234567, vk17x01234567));
+
+ const __m256i vi18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i18));
+ const __m256i vk18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 144 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi18x01234567, vk18x01234567));
+
+ const __m256i vi19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i19));
+ const __m256i vk19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 152 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi19x01234567, vk19x01234567));
+
+ const __m256i vi20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i20));
+ const __m256i vk20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 160 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi20x01234567, vk20x01234567));
+
+ const __m256i vi21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i21));
+ const __m256i vk21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 168 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi21x01234567, vk21x01234567));
+
+ const __m256i vi22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i22));
+ const __m256i vk22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 176 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi22x01234567, vk22x01234567));
+
+ const __m256i vi23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i23));
+ const __m256i vk23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 184 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi23x01234567, vk23x01234567));
+
+ const __m256i vi24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i24));
+ const __m256i vk24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 192 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
+
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(int32_t) + 200 * sizeof(int8_t)));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
+ vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
+
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ output += 1;
+ }
+ }
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qc8-dwconv/gen/up8x9-minmax-fp32-avx2-mul32.c b/src/qc8-dwconv/gen/up8x9-minmax-fp32-avx2-mul32.c
new file mode 100644
index 0000000..4f1e137
--- /dev/null
+++ b/src/qc8-dwconv/gen/up8x9-minmax-fp32-avx2-mul32.c
@@ -0,0 +1,248 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/unipass-avx2-mul32.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qc8_dwconv_minmax_fp32_ukernel_up8x9__avx2_mul32(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const void* w = weights;
+ for (; c >= 8; c -= 8) {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+
+
+ const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
+ const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t))));
+ i0 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
+
+ const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
+ const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t))));
+ i1 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
+
+ const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
+ const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t))));
+ i2 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
+
+ const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
+ const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t))));
+ i3 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
+
+ const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
+ const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t))));
+ i4 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
+
+ const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
+ const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t))));
+ i5 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
+
+ const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
+ const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t))));
+ i6 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
+
+ const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
+ const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t))));
+ i7 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
+
+ const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
+ const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t))));
+ i8 += 8;
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t));
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w);
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(float));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
+ vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
+
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ {
+ __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
+
+
+ const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
+ const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
+
+ const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
+ const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
+
+ const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
+ const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
+
+ const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
+ const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
+
+ const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
+ const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
+
+ const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
+ const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
+
+ const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
+ const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
+
+ const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
+ const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
+
+ const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
+ const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t))));
+
+ vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
+
+
+ __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
+ const __m256 vscale01234567 = _mm256_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t)));
+ vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
+ vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx2.output_max);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
+ vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
+ vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
+
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ output += 1;
+ }
+ }
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qs8-dwconv/gen/up16x25-minmax-fp32-avx2-mul16.c b/src/qs8-dwconv/gen/up16x25-minmax-fp32-avx2-mul16.c
index e8938a0..d49c5fc 100644
--- a/src/qs8-dwconv/gen/up16x25-minmax-fp32-avx2-mul16.c
+++ b/src/qs8-dwconv/gen/up16x25-minmax-fp32-avx2-mul16.c
@@ -630,6 +630,7 @@
vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extracti128_si256(vacc89ABCDEF, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up16x25-minmax-fp32-avx2-mul32.c b/src/qs8-dwconv/gen/up16x25-minmax-fp32-avx2-mul32.c
index 4cf1156..8e035a0 100644
--- a/src/qs8-dwconv/gen/up16x25-minmax-fp32-avx2-mul32.c
+++ b/src/qs8-dwconv/gen/up16x25-minmax-fp32-avx2-mul32.c
@@ -570,13 +570,14 @@
vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
- w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
k += 8;
__m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
vscaled01234567 = _mm256_mul_ps(vscaled01234567, _mm256_load_ps(params->fp32_avx2.scale));
vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up16x25-minmax-gemmlowp-avx2-mul16.c b/src/qs8-dwconv/gen/up16x25-minmax-gemmlowp-avx2-mul16.c
index ffd862a..8b99630 100644
--- a/src/qs8-dwconv/gen/up16x25-minmax-gemmlowp-avx2-mul16.c
+++ b/src/qs8-dwconv/gen/up16x25-minmax-gemmlowp-avx2-mul16.c
@@ -674,6 +674,7 @@
vacc89ABCDEF =
_mm256_sub_epi32(_mm256_sra_epi32(vq31prod89ABCDEF, vshift), _mm256_cmpgt_epi32(vrem89ABCDEF, vremainder_threshold));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->gemmlowp_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extracti128_si256(vacc89ABCDEF, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up16x25-minmax-gemmlowp-avx2-mul32.c b/src/qs8-dwconv/gen/up16x25-minmax-gemmlowp-avx2-mul32.c
index 9db5041..9965288 100644
--- a/src/qs8-dwconv/gen/up16x25-minmax-gemmlowp-avx2-mul32.c
+++ b/src/qs8-dwconv/gen/up16x25-minmax-gemmlowp-avx2-mul32.c
@@ -592,7 +592,6 @@
vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
- w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
k += 8;
const __m256i vmultiplier = _mm256_load_si256((const __m256i*) params->gemmlowp_avx2.multiplier);
@@ -617,6 +616,8 @@
vacc01234567 =
_mm256_sub_epi32(_mm256_sra_epi32(vq31prod01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->gemmlowp_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up16x9-minmax-fp32-avx2-mul16.c b/src/qs8-dwconv/gen/up16x9-minmax-fp32-avx2-mul16.c
index 9a002b0..31c67be 100644
--- a/src/qs8-dwconv/gen/up16x9-minmax-fp32-avx2-mul16.c
+++ b/src/qs8-dwconv/gen/up16x9-minmax-fp32-avx2-mul16.c
@@ -278,6 +278,7 @@
vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extracti128_si256(vacc89ABCDEF, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up16x9-minmax-fp32-avx2-mul32.c b/src/qs8-dwconv/gen/up16x9-minmax-fp32-avx2-mul32.c
index 9b34f93..f2aaa05 100644
--- a/src/qs8-dwconv/gen/up16x9-minmax-fp32-avx2-mul32.c
+++ b/src/qs8-dwconv/gen/up16x9-minmax-fp32-avx2-mul32.c
@@ -250,13 +250,14 @@
vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
- w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
k += 8;
__m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
vscaled01234567 = _mm256_mul_ps(vscaled01234567, _mm256_load_ps(params->fp32_avx2.scale));
vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up16x9-minmax-gemmlowp-avx2-mul16.c b/src/qs8-dwconv/gen/up16x9-minmax-gemmlowp-avx2-mul16.c
index 5476ae8..603e448 100644
--- a/src/qs8-dwconv/gen/up16x9-minmax-gemmlowp-avx2-mul16.c
+++ b/src/qs8-dwconv/gen/up16x9-minmax-gemmlowp-avx2-mul16.c
@@ -322,6 +322,7 @@
vacc89ABCDEF =
_mm256_sub_epi32(_mm256_sra_epi32(vq31prod89ABCDEF, vshift), _mm256_cmpgt_epi32(vrem89ABCDEF, vremainder_threshold));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->gemmlowp_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extracti128_si256(vacc89ABCDEF, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up16x9-minmax-gemmlowp-avx2-mul32.c b/src/qs8-dwconv/gen/up16x9-minmax-gemmlowp-avx2-mul32.c
index a52e31a..5ae1cc2 100644
--- a/src/qs8-dwconv/gen/up16x9-minmax-gemmlowp-avx2-mul32.c
+++ b/src/qs8-dwconv/gen/up16x9-minmax-gemmlowp-avx2-mul32.c
@@ -272,7 +272,6 @@
vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
- w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
k += 8;
const __m256i vmultiplier = _mm256_load_si256((const __m256i*) params->gemmlowp_avx2.multiplier);
@@ -297,6 +296,8 @@
vacc01234567 =
_mm256_sub_epi32(_mm256_sra_epi32(vq31prod01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->gemmlowp_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up24x25-minmax-fp32-avx2-mul32.c b/src/qs8-dwconv/gen/up24x25-minmax-fp32-avx2-mul32.c
index 08f6a46..59fa607 100644
--- a/src/qs8-dwconv/gen/up24x25-minmax-fp32-avx2-mul32.c
+++ b/src/qs8-dwconv/gen/up24x25-minmax-fp32-avx2-mul32.c
@@ -654,13 +654,14 @@
vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
- w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
k += 8;
__m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
vscaled01234567 = _mm256_mul_ps(vscaled01234567, _mm256_load_ps(params->fp32_avx2.scale));
vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up24x25-minmax-gemmlowp-avx2-mul32.c b/src/qs8-dwconv/gen/up24x25-minmax-gemmlowp-avx2-mul32.c
index bf2dab7..7cfbf04 100644
--- a/src/qs8-dwconv/gen/up24x25-minmax-gemmlowp-avx2-mul32.c
+++ b/src/qs8-dwconv/gen/up24x25-minmax-gemmlowp-avx2-mul32.c
@@ -683,7 +683,6 @@
vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
- w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
k += 8;
const __m256i vmultiplier = _mm256_load_si256((const __m256i*) params->gemmlowp_avx2.multiplier);
@@ -708,6 +707,8 @@
vacc01234567 =
_mm256_sub_epi32(_mm256_sra_epi32(vq31prod01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->gemmlowp_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up24x9-minmax-fp32-avx2-mul32.c b/src/qs8-dwconv/gen/up24x9-minmax-fp32-avx2-mul32.c
index c10fa47..321245b 100644
--- a/src/qs8-dwconv/gen/up24x9-minmax-fp32-avx2-mul32.c
+++ b/src/qs8-dwconv/gen/up24x9-minmax-fp32-avx2-mul32.c
@@ -286,13 +286,14 @@
vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
- w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
k += 8;
__m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
vscaled01234567 = _mm256_mul_ps(vscaled01234567, _mm256_load_ps(params->fp32_avx2.scale));
vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up24x9-minmax-gemmlowp-avx2-mul32.c b/src/qs8-dwconv/gen/up24x9-minmax-gemmlowp-avx2-mul32.c
index 1fe0d8d..1ede0fd 100644
--- a/src/qs8-dwconv/gen/up24x9-minmax-gemmlowp-avx2-mul32.c
+++ b/src/qs8-dwconv/gen/up24x9-minmax-gemmlowp-avx2-mul32.c
@@ -315,7 +315,6 @@
vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
- w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
k += 8;
const __m256i vmultiplier = _mm256_load_si256((const __m256i*) params->gemmlowp_avx2.multiplier);
@@ -340,6 +339,8 @@
vacc01234567 =
_mm256_sub_epi32(_mm256_sra_epi32(vq31prod01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->gemmlowp_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up32x25-minmax-fp32-avx2-mul16.c b/src/qs8-dwconv/gen/up32x25-minmax-fp32-avx2-mul16.c
index 5fbbc91..b09b3d6 100644
--- a/src/qs8-dwconv/gen/up32x25-minmax-fp32-avx2-mul16.c
+++ b/src/qs8-dwconv/gen/up32x25-minmax-fp32-avx2-mul16.c
@@ -808,7 +808,6 @@
vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod24x0123456789ABCDEF)));
vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod24x89ABCDEF));
- w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t));
k += 16;
__m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
@@ -821,6 +820,8 @@
vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extracti128_si256(vacc89ABCDEF, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up32x25-minmax-fp32-avx2-mul32.c b/src/qs8-dwconv/gen/up32x25-minmax-fp32-avx2-mul32.c
index f8017a2..c7b9a10 100644
--- a/src/qs8-dwconv/gen/up32x25-minmax-fp32-avx2-mul32.c
+++ b/src/qs8-dwconv/gen/up32x25-minmax-fp32-avx2-mul32.c
@@ -733,13 +733,14 @@
vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
- w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
k += 8;
__m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
vscaled01234567 = _mm256_mul_ps(vscaled01234567, _mm256_load_ps(params->fp32_avx2.scale));
vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up32x25-minmax-gemmlowp-avx2-mul16.c b/src/qs8-dwconv/gen/up32x25-minmax-gemmlowp-avx2-mul16.c
index 7965487..ca64db2 100644
--- a/src/qs8-dwconv/gen/up32x25-minmax-gemmlowp-avx2-mul16.c
+++ b/src/qs8-dwconv/gen/up32x25-minmax-gemmlowp-avx2-mul16.c
@@ -844,7 +844,6 @@
vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod24x0123456789ABCDEF)));
vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod24x89ABCDEF));
- w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t));
k += 16;
const __m256i vmultiplier = _mm256_load_si256((const __m256i*) params->gemmlowp_avx2.multiplier);
@@ -879,6 +878,8 @@
vacc89ABCDEF =
_mm256_sub_epi32(_mm256_sra_epi32(vq31prod89ABCDEF, vshift), _mm256_cmpgt_epi32(vrem89ABCDEF, vremainder_threshold));
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->gemmlowp_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extracti128_si256(vacc89ABCDEF, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up32x25-minmax-gemmlowp-avx2-mul32.c b/src/qs8-dwconv/gen/up32x25-minmax-gemmlowp-avx2-mul32.c
index 3d51fd6..622df86 100644
--- a/src/qs8-dwconv/gen/up32x25-minmax-gemmlowp-avx2-mul32.c
+++ b/src/qs8-dwconv/gen/up32x25-minmax-gemmlowp-avx2-mul32.c
@@ -769,7 +769,6 @@
vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
- w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
k += 8;
const __m256i vmultiplier = _mm256_load_si256((const __m256i*) params->gemmlowp_avx2.multiplier);
@@ -794,6 +793,8 @@
vacc01234567 =
_mm256_sub_epi32(_mm256_sra_epi32(vq31prod01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->gemmlowp_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up32x9-minmax-fp32-avx2-mul16.c b/src/qs8-dwconv/gen/up32x9-minmax-fp32-avx2-mul16.c
index 258a80b..5b8a3d8 100644
--- a/src/qs8-dwconv/gen/up32x9-minmax-fp32-avx2-mul16.c
+++ b/src/qs8-dwconv/gen/up32x9-minmax-fp32-avx2-mul16.c
@@ -344,7 +344,6 @@
vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod8x0123456789ABCDEF)));
vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod8x89ABCDEF));
- w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t));
k += 16;
__m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
@@ -357,6 +356,8 @@
vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extracti128_si256(vacc89ABCDEF, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up32x9-minmax-fp32-avx2-mul32.c b/src/qs8-dwconv/gen/up32x9-minmax-fp32-avx2-mul32.c
index 5bde3a6..e338c43 100644
--- a/src/qs8-dwconv/gen/up32x9-minmax-fp32-avx2-mul32.c
+++ b/src/qs8-dwconv/gen/up32x9-minmax-fp32-avx2-mul32.c
@@ -317,13 +317,14 @@
vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
- w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
k += 8;
__m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
vscaled01234567 = _mm256_mul_ps(vscaled01234567, _mm256_load_ps(params->fp32_avx2.scale));
vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up32x9-minmax-gemmlowp-avx2-mul16.c b/src/qs8-dwconv/gen/up32x9-minmax-gemmlowp-avx2-mul16.c
index 853b119..7eb8bee 100644
--- a/src/qs8-dwconv/gen/up32x9-minmax-gemmlowp-avx2-mul16.c
+++ b/src/qs8-dwconv/gen/up32x9-minmax-gemmlowp-avx2-mul16.c
@@ -380,7 +380,6 @@
vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_cvtepi16_epi32(_mm256_castsi256_si128(vprod8x0123456789ABCDEF)));
vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_cvtepi16_epi32(vprod8x89ABCDEF));
- w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t));
k += 16;
const __m256i vmultiplier = _mm256_load_si256((const __m256i*) params->gemmlowp_avx2.multiplier);
@@ -415,6 +414,8 @@
vacc89ABCDEF =
_mm256_sub_epi32(_mm256_sra_epi32(vq31prod89ABCDEF, vshift), _mm256_cmpgt_epi32(vrem89ABCDEF, vremainder_threshold));
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->gemmlowp_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc89ABCDEF), _mm256_extracti128_si256(vacc89ABCDEF, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up32x9-minmax-gemmlowp-avx2-mul32.c b/src/qs8-dwconv/gen/up32x9-minmax-gemmlowp-avx2-mul32.c
index cc5dcf2..ea903da 100644
--- a/src/qs8-dwconv/gen/up32x9-minmax-gemmlowp-avx2-mul32.c
+++ b/src/qs8-dwconv/gen/up32x9-minmax-gemmlowp-avx2-mul32.c
@@ -353,7 +353,6 @@
vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
- w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
k += 8;
const __m256i vmultiplier = _mm256_load_si256((const __m256i*) params->gemmlowp_avx2.multiplier);
@@ -378,6 +377,8 @@
vacc01234567 =
_mm256_sub_epi32(_mm256_sra_epi32(vq31prod01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->gemmlowp_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up8x25-minmax-fp32-avx2-mul32.c b/src/qs8-dwconv/gen/up8x25-minmax-fp32-avx2-mul32.c
index a8af432..2dccd8b 100644
--- a/src/qs8-dwconv/gen/up8x25-minmax-fp32-avx2-mul32.c
+++ b/src/qs8-dwconv/gen/up8x25-minmax-fp32-avx2-mul32.c
@@ -470,6 +470,7 @@
vscaled01234567 = _mm256_mul_ps(vscaled01234567, _mm256_load_ps(params->fp32_avx2.scale));
vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up8x25-minmax-gemmlowp-avx2-mul32.c b/src/qs8-dwconv/gen/up8x25-minmax-gemmlowp-avx2-mul32.c
index 7734857..aa97f16 100644
--- a/src/qs8-dwconv/gen/up8x25-minmax-gemmlowp-avx2-mul32.c
+++ b/src/qs8-dwconv/gen/up8x25-minmax-gemmlowp-avx2-mul32.c
@@ -503,6 +503,7 @@
vacc01234567 =
_mm256_sub_epi32(_mm256_sra_epi32(vq31prod01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->gemmlowp_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up8x9-minmax-fp32-avx2-mul32.c b/src/qs8-dwconv/gen/up8x9-minmax-fp32-avx2-mul32.c
index fb18b6c..1be7df8 100644
--- a/src/qs8-dwconv/gen/up8x9-minmax-fp32-avx2-mul32.c
+++ b/src/qs8-dwconv/gen/up8x9-minmax-fp32-avx2-mul32.c
@@ -214,6 +214,7 @@
vscaled01234567 = _mm256_mul_ps(vscaled01234567, _mm256_load_ps(params->fp32_avx2.scale));
vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/gen/up8x9-minmax-gemmlowp-avx2-mul32.c b/src/qs8-dwconv/gen/up8x9-minmax-gemmlowp-avx2-mul32.c
index 5e039fe..dde0ddb 100644
--- a/src/qs8-dwconv/gen/up8x9-minmax-gemmlowp-avx2-mul32.c
+++ b/src/qs8-dwconv/gen/up8x9-minmax-gemmlowp-avx2-mul32.c
@@ -247,6 +247,7 @@
vacc01234567 =
_mm256_sub_epi32(_mm256_sra_epi32(vq31prod01234567, vshift), _mm256_cmpgt_epi32(vrem01234567, vremainder_threshold));
+
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->gemmlowp_avx2.output_zero_point);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
diff --git a/src/qs8-dwconv/unipass-avx2-mul16.c.in b/src/qs8-dwconv/unipass-avx2-mul16.c.in
index e01b67e..d863bad 100644
--- a/src/qs8-dwconv/unipass-avx2-mul16.c.in
+++ b/src/qs8-dwconv/unipass-avx2-mul16.c.in
@@ -5,6 +5,7 @@
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
$assert REQUANTIZATION in ["GEMMLOWP", "FP32"]
+$assert not CHANNELWISE or REQUANTIZATION == "FP32"
$assert CHANNEL_TILE % 16 == 0
$assert CHANNEL_TILE >= 16
$assert KERNEL_TILE >= 2
@@ -15,7 +16,10 @@
#include <xnnpack/dwconv.h>
-void xnn_qs8_dwconv_minmax_${REQUANTIZATION.lower()}_ukernel_up${CHANNEL_TILE}x${KERNEL_TILE}__avx2_mul16(
+$DATATYPE = "qc8" if CHANNELWISE else "qs8"
+$PARAMS_STRUCT = "avx2" if CHANNELWISE else REQUANTIZATION.lower() + "_avx2"
+$CONV_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
+void xnn_${DATATYPE}_dwconv_minmax_${REQUANTIZATION.lower()}_ukernel_up${CHANNEL_TILE}x${KERNEL_TILE}__avx2_mul16(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -25,7 +29,7 @@
size_t output_increment,
size_t input_offset,
const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+ const union ${CONV_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
{
assert(channels != 0);
assert(output_width != 0);
@@ -98,22 +102,30 @@
$for C in range(0, CHANNEL_TILE, 8):
__m256 vscaled${ABC[C:C+8]} = _mm256_cvtepi32_ps(vacc${ABC[C:C+8]});
- const __m256 vscale = _mm256_load_ps(params->fp32_avx2.scale);
- $for C in range(0, CHANNEL_TILE, 8):
- vscaled${ABC[C:C+8]} = _mm256_mul_ps(vscaled${ABC[C:C+8]}, vscale);
+ $if CHANNELWISE:
+ const __m256 vscale${ABC[0:8]} = _mm256_loadu_ps((const float*) w);
+ $for C in range(8, CHANNEL_TILE, 8):
+ const __m256 vscale${ABC[C:C+8]} = _mm256_loadu_ps((const float*) ((uintptr_t) w + ${C} * sizeof(float)));
+ w = (const void*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(float));
+ $for C in range(0, CHANNEL_TILE, 8):
+ vscaled${ABC[C:C+8]} = _mm256_mul_ps(vscaled${ABC[C:C+8]}, vscale${ABC[C:C+8]});
+ $else:
+ const __m256 vscale = _mm256_load_ps(params->fp32_avx2.scale);
+ $for C in range(0, CHANNEL_TILE, 8):
+ vscaled${ABC[C:C+8]} = _mm256_mul_ps(vscaled${ABC[C:C+8]}, vscale);
$for C in range(0, CHANNEL_TILE, 8):
vacc${ABC[C:C+8]} = _mm256_cvtps_epi32(vscaled${ABC[C:C+8]});
- const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->${REQUANTIZATION.lower()}_avx2.output_zero_point);
+ const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->${PARAMS_STRUCT}.output_zero_point);
$for C in range(0, CHANNEL_TILE, 16):
const __m256i vout${ABC[C:C+4]}${ABC[C+8:C+12]}${ABC[C+4:C+8]}${ABC[C+12:C+16]} = _mm256_adds_epi16(_mm256_packs_epi32(vacc${ABC[C:C+8]}, vacc${ABC[C+8:C+16]}), voutput_zero_point);
$for C in range(0, CHANNEL_TILE, 16):
__m128i vout${ABC[C:C+16]} = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout${ABC[C:C+4]}${ABC[C+8:C+12]}${ABC[C+4:C+8]}${ABC[C+12:C+16]}), _mm256_extracti128_si256(vout${ABC[C:C+4]}${ABC[C+8:C+12]}${ABC[C+4:C+8]}${ABC[C+12:C+16]}, 1)), _MM_SHUFFLE(3, 1, 2, 0));
- const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${REQUANTIZATION.lower()}_avx2.output_min);
- const __m128i voutput_max = _mm_load_si128((const __m128i*) params->${REQUANTIZATION.lower()}_avx2.output_max);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_max);
$for C in range(0, CHANNEL_TILE, 16):
vout${ABC[C:C+16]} = _mm_max_epi8(vout${ABC[C:C+16]}, voutput_min);
vout${ABC[C:C+16]} = _mm_min_epi8(vout${ABC[C:C+16]}, voutput_max);
@@ -149,7 +161,6 @@
vacc${ABC[8:16]} = _mm256_add_epi32(vacc${ABC[8:16]}, _mm256_cvtepi16_epi32(vprod${K}x${ABC[8:16]}));
$if CHANNEL_TILE > 16:
- w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t));
k += 16;
$if REQUANTIZATION == "GEMMLOWP":
@@ -188,19 +199,28 @@
__m256 vscaled${ABC[0:8]} = _mm256_cvtepi32_ps(vacc${ABC[0:8]});
__m256 vscaled${ABC[8:16]} = _mm256_cvtepi32_ps(vacc${ABC[8:16]});
- const __m256 vscale = _mm256_load_ps(params->fp32_avx2.scale);
- vscaled${ABC[0:8]} = _mm256_mul_ps(vscaled${ABC[0:8]}, vscale);
- vscaled${ABC[8:16]} = _mm256_mul_ps(vscaled${ABC[8:16]}, vscale);
+ $if CHANNELWISE:
+ const __m256 vscale${ABC[0:8]} = _mm256_loadu_ps((const float*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${CHANNEL_TILE * KERNEL_TILE} * sizeof(int8_t)));
+ const __m256 vscale${ABC[8:16]} = _mm256_loadu_ps((const float*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${CHANNEL_TILE * KERNEL_TILE} * sizeof(int8_t) + 8 * sizeof(float)));
+ vscaled${ABC[0:8]} = _mm256_mul_ps(vscaled${ABC[0:8]}, vscale${ABC[0:8]});
+ vscaled${ABC[8:16]} = _mm256_mul_ps(vscaled${ABC[8:16]}, vscale${ABC[8:16]});
+ $else:
+ const __m256 vscale = _mm256_load_ps(params->fp32_avx2.scale);
+ vscaled${ABC[0:8]} = _mm256_mul_ps(vscaled${ABC[0:8]}, vscale);
+ vscaled${ABC[8:16]} = _mm256_mul_ps(vscaled${ABC[8:16]}, vscale);
vacc${ABC[0:8]} = _mm256_cvtps_epi32(vscaled${ABC[0:8]});
vacc${ABC[8:16]} = _mm256_cvtps_epi32(vscaled${ABC[8:16]});
- const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->${REQUANTIZATION.lower()}_avx2.output_zero_point);
+ $if CHANNEL_TILE > 16:
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_zero_point);
__m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[0:8]}), _mm256_extracti128_si256(vacc${ABC[0:8]}, 1)), voutput_zero_point);
__m128i vout${ABC[8:16]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[8:16]}), _mm256_extracti128_si256(vacc${ABC[8:16]}, 1)), voutput_zero_point);
- const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${REQUANTIZATION.lower()}_avx2.output_min);
- const __m128i voutput_max = _mm_load_si128((const __m128i*) params->${REQUANTIZATION.lower()}_avx2.output_max);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_max);
__m128i vout${ABC[0:16]} = _mm_packs_epi16(vout${ABC[0:8]}, vout${ABC[8:16]});
vout${ABC[0:16]} = _mm_min_epi8(_mm_max_epi8(vout${ABC[0:16]}, voutput_min), voutput_max);
diff --git a/src/qs8-dwconv/unipass-avx2-mul32.c.in b/src/qs8-dwconv/unipass-avx2-mul32.c.in
index 8f6d839..80ca4cf 100644
--- a/src/qs8-dwconv/unipass-avx2-mul32.c.in
+++ b/src/qs8-dwconv/unipass-avx2-mul32.c.in
@@ -5,6 +5,7 @@
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
$assert REQUANTIZATION in ["GEMMLOWP", "FP32"]
+$assert not CHANNELWISE or REQUANTIZATION == "FP32"
$assert CHANNEL_TILE % 8 == 0
$assert CHANNEL_TILE >= 8
$assert KERNEL_TILE >= 2
@@ -15,7 +16,10 @@
#include <xnnpack/dwconv.h>
-void xnn_qs8_dwconv_minmax_${REQUANTIZATION.lower()}_ukernel_up${CHANNEL_TILE}x${KERNEL_TILE}__avx2_mul32(
+$DATATYPE = "qc8" if CHANNELWISE else "qs8"
+$PARAMS_STRUCT = "avx2" if CHANNELWISE else REQUANTIZATION.lower() + "_avx2"
+$CONV_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
+void xnn_${DATATYPE}_dwconv_minmax_${REQUANTIZATION.lower()}_ukernel_up${CHANNEL_TILE}x${KERNEL_TILE}__avx2_mul32(
size_t channels,
size_t output_width,
const int8_t** input,
@@ -25,7 +29,7 @@
size_t output_increment,
size_t input_offset,
const int8_t* zero,
- const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+ const union ${CONV_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
{
assert(channels != 0);
assert(output_width != 0);
@@ -96,17 +100,25 @@
$for C in range(0, CHANNEL_TILE, 8):
__m256 vscaled${ABC[C:C+8]} = _mm256_cvtepi32_ps(vacc${ABC[C:C+8]});
- const __m256 vscale = _mm256_load_ps(params->fp32_avx2.scale);
- $for C in range(0, CHANNEL_TILE, 8):
- vscaled${ABC[C:C+8]} = _mm256_mul_ps(vscaled${ABC[C:C+8]}, vscale);
+ $if CHANNELWISE:
+ const __m256 vscale${ABC[0:8]} = _mm256_loadu_ps((const float*) w);
+ $for C in range(8, CHANNEL_TILE, 8):
+ const __m256 vscale${ABC[C:C+8]} = _mm256_loadu_ps((const float*) ((uintptr_t) w + ${C} * sizeof(float)));
+ w = (const void*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(float));
+ $for C in range(0, CHANNEL_TILE, 8):
+ vscaled${ABC[C:C+8]} = _mm256_mul_ps(vscaled${ABC[C:C+8]}, vscale${ABC[C:C+8]});
+ $else:
+ const __m256 vscale = _mm256_load_ps(params->fp32_avx2.scale);
+ $for C in range(0, CHANNEL_TILE, 8):
+ vscaled${ABC[C:C+8]} = _mm256_mul_ps(vscaled${ABC[C:C+8]}, vscale);
$for C in range(0, CHANNEL_TILE, 8):
vacc${ABC[C:C+8]} = _mm256_cvtps_epi32(vscaled${ABC[C:C+8]});
$if CHANNEL_TILE > 8:
- const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->${REQUANTIZATION.lower()}_avx2.output_zero_point);
+ const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->${PARAMS_STRUCT}.output_zero_point);
$else:
- const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->${REQUANTIZATION.lower()}_avx2.output_zero_point);
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_zero_point);
$for C in range(0, CHANNEL_TILE, 16):
$if C + 8 < CHANNEL_TILE:
__m256i vout${ABC[C:C+4]}${ABC[C+8:C+12]}${ABC[C+4:C+8]}${ABC[C+12:C+16]} = _mm256_adds_epi16(_mm256_packs_epi32(vacc${ABC[C:C+8]}, vacc${ABC[C+8:C+16]}), voutput_zero_point);
@@ -121,8 +133,8 @@
$else:
__m128i vout${ABC[C:C+8]}${ABC[C:C+8]} = _mm_packs_epi16(vout${ABC[C:C+8]}, vout${ABC[C:C+8]});
- const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${REQUANTIZATION.lower()}_avx2.output_min);
- const __m128i voutput_max = _mm_load_si128((const __m128i*) params->${REQUANTIZATION.lower()}_avx2.output_max);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_max);
$for C in range(0, CHANNEL_TILE, 16):
$if C + 8 < CHANNEL_TILE:
vout${ABC[C:C+16]} = _mm_max_epi8(vout${ABC[C:C+16]}, voutput_min);
@@ -164,7 +176,6 @@
vacc${ABC[0:8]} = _mm256_add_epi32(vacc${ABC[0:8]}, _mm256_mullo_epi32(vi${K}x${ABC[0:8]}, vk${K}x${ABC[0:8]}));
$if CHANNEL_TILE > 8:
- w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
k += 8;
$if REQUANTIZATION == "GEMMLOWP":
@@ -191,15 +202,22 @@
_mm256_sub_epi32(_mm256_sra_epi32(vq31prod${ABC[0:8]}, vshift), _mm256_cmpgt_epi32(vrem${ABC[0:8]}, vremainder_threshold));
$elif REQUANTIZATION == "FP32":
__m256 vscaled${ABC[0:8]} = _mm256_cvtepi32_ps(vacc${ABC[0:8]});
- vscaled${ABC[0:8]} = _mm256_mul_ps(vscaled${ABC[0:8]}, _mm256_load_ps(params->fp32_avx2.scale));
+ $if CHANNELWISE:
+ const __m256 vscale${ABC[0:8]} = _mm256_loadu_ps((const float*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${CHANNEL_TILE * KERNEL_TILE} * sizeof(int8_t)));
+ vscaled${ABC[0:8]} = _mm256_mul_ps(vscaled${ABC[0:8]}, vscale${ABC[0:8]});
+ $else:
+ vscaled${ABC[0:8]} = _mm256_mul_ps(vscaled${ABC[0:8]}, _mm256_load_ps(params->fp32_avx2.scale));
vacc${ABC[0:8]} = _mm256_cvtps_epi32(vscaled${ABC[0:8]});
- const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->${REQUANTIZATION.lower()}_avx2.output_zero_point);
+ $if CHANNEL_TILE > 8:
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_zero_point);
__m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc${ABC[0:8]}), _mm256_extracti128_si256(vacc${ABC[0:8]}, 1)), voutput_zero_point);
__m128i vout${ABC[0:8]}${ABC[0:8]} = _mm_packs_epi16(vout${ABC[0:8]}, vout${ABC[0:8]});
- const __m128i voutput_max = _mm_load_si128((const __m128i*) params->${REQUANTIZATION.lower()}_avx2.output_max);
- const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${REQUANTIZATION.lower()}_avx2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_max);
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min);
vout${ABC[0:8]}${ABC[0:8]} = _mm_min_epi8(vout${ABC[0:8]}${ABC[0:8]}, voutput_max);
vout${ABC[0:8]}${ABC[0:8]} = _mm_max_epi8(vout${ABC[0:8]}${ABC[0:8]}, voutput_min);
diff --git a/src/qs8-gemm/MRx16c8-avx512skx.c.in b/src/qs8-gemm/MRx16c8-avx512skx.c.in
index 41e5840..3a3a523 100644
--- a/src/qs8-gemm/MRx16c8-avx512skx.c.in
+++ b/src/qs8-gemm/MRx16c8-avx512skx.c.in
@@ -19,7 +19,7 @@
$DATATYPE = "qc8" if CHANNELWISE else "qs8"
$PARAMS_STRUCT = "avx512" if CHANNELWISE else REQUANTIZATION.lower() + "_avx512"
$GEMM_SUFFIX = "_xw" if VARIANT == "EXTENDED" else ""
-$GEMM_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
+$CONV_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
void xnn_${DATATYPE}_gemm${GEMM_SUFFIX}_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x16c8__avx512skx(
size_t mr,
size_t nc,
@@ -30,7 +30,7 @@
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
- const union ${GEMM_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+ const union ${CONV_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
{
assert(mr != 0);
assert(mr <= ${MR});
diff --git a/src/qs8-gemm/MRx4c2-sse.c.in b/src/qs8-gemm/MRx4c2-sse.c.in
index 2d9ea8a..03d4d8d 100644
--- a/src/qs8-gemm/MRx4c2-sse.c.in
+++ b/src/qs8-gemm/MRx4c2-sse.c.in
@@ -32,7 +32,7 @@
$DATATYPE = "qc8" if CHANNELWISE else "qs8"
$LOAD_SUFFIX = {"LD128": "_ld128", "LD64": "_ld64", "EXTENDED": ""}[VARIANT]
$GEMM_SUFFIX = "_xw" if VARIANT == "EXTENDED" else ""
-$GEMM_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
+$CONV_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
$PARAMS_STRUCT = ("" if CHANNELWISE else REQUANTIZATION.lower() + "_") + ("sse4" if SSE >= 4 else "sse2")
$ISA = "xop" if XOP else "avx" if AVX else {2: "sse2", 3: "ssse3", 4: "sse41"}[SSE]
void xnn_${DATATYPE}_gemm${GEMM_SUFFIX}_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x4c2__${ISA}${LOAD_SUFFIX}(
@@ -45,7 +45,7 @@
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
- const union ${GEMM_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+ const union ${CONV_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
{
assert(mr != 0);
assert(mr <= ${MR});
diff --git a/src/qs8-gemm/MRx4c8-sse.c.in b/src/qs8-gemm/MRx4c8-sse.c.in
index d50aff8..48b34d2 100644
--- a/src/qs8-gemm/MRx4c8-sse.c.in
+++ b/src/qs8-gemm/MRx4c8-sse.c.in
@@ -30,7 +30,7 @@
$DATATYPE = "qc8" if CHANNELWISE else "qs8"
$LOAD_SUFFIX = {"LD128": "_ld128", "LD64": "_ld64", "EXTENDED": ""}[VARIANT]
$GEMM_SUFFIX = "_xw" if VARIANT == "EXTENDED" else ""
-$GEMM_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
+$CONV_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
$PARAMS_STRUCT = ("" if CHANNELWISE else REQUANTIZATION.lower() + "_") + ("sse4" if SSE >= 4 else "sse2")
$ISA = "xop" if XOP else "avx" if AVX else {2: "sse2", 3: "ssse3", 4: "sse41"}[SSE]
void xnn_${DATATYPE}_gemm${GEMM_SUFFIX}_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x4c8__${ISA}${LOAD_SUFFIX}(
@@ -43,7 +43,7 @@
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
- const union ${GEMM_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+ const union ${CONV_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
{
assert(mr != 0);
assert(mr <= ${MR});
diff --git a/src/qs8-gemm/MRx8c8-avx2.c.in b/src/qs8-gemm/MRx8c8-avx2.c.in
index c0f0da9..e1c753a 100644
--- a/src/qs8-gemm/MRx8c8-avx2.c.in
+++ b/src/qs8-gemm/MRx8c8-avx2.c.in
@@ -19,7 +19,7 @@
$DATATYPE = "qc8" if CHANNELWISE else "qs8"
$PARAMS_STRUCT = "avx2" if CHANNELWISE else REQUANTIZATION.lower() + "_avx2"
$GEMM_SUFFIX = "_xw" if VARIANT == "EXTENDED" else ""
-$GEMM_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
+$CONV_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
void xnn_${DATATYPE}_gemm${GEMM_SUFFIX}_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x8c8__avx2(
size_t mr,
size_t nc,
@@ -30,7 +30,7 @@
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
- const union ${GEMM_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+ const union ${CONV_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
{
assert(mr != 0);
assert(mr <= ${MR});
diff --git a/src/qs8-igemm/MRx16c8-avx512skx.c.in b/src/qs8-igemm/MRx16c8-avx512skx.c.in
index c2e4ffe..a32ef2a 100644
--- a/src/qs8-igemm/MRx16c8-avx512skx.c.in
+++ b/src/qs8-igemm/MRx16c8-avx512skx.c.in
@@ -19,7 +19,7 @@
$DATATYPE = "qc8" if CHANNELWISE else "qs8"
$PARAMS_STRUCT = "avx512" if CHANNELWISE else REQUANTIZATION.lower() + "_avx512"
$GEMM_SUFFIX = "_xw" if VARIANT == "EXTENDED" else ""
-$GEMM_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
+$CONV_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
void xnn_${DATATYPE}_igemm${GEMM_SUFFIX}_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x16c8__avx512skx(
size_t mr,
size_t nc,
@@ -32,7 +32,7 @@
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
- const union ${GEMM_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+ const union ${CONV_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
{
assert(mr != 0);
assert(mr <= ${MR});
diff --git a/src/qs8-igemm/MRx4c2-sse.c.in b/src/qs8-igemm/MRx4c2-sse.c.in
index 84b2b8c..82b75bc 100644
--- a/src/qs8-igemm/MRx4c2-sse.c.in
+++ b/src/qs8-igemm/MRx4c2-sse.c.in
@@ -29,7 +29,7 @@
$DATATYPE = "qc8" if CHANNELWISE else "qs8"
-$GEMM_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
+$CONV_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
$PARAMS_STRUCT = ("" if CHANNELWISE else REQUANTIZATION.lower() + "_") + ("sse4" if SSE >= 4 else "sse2")
$ISA = "xop" if XOP else "avx" if AVX else {2: "sse2", 3: "ssse3", 4: "sse41"}[SSE]
void xnn_${DATATYPE}_igemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x4c2__${ISA}_${VARIANT.lower()}(
@@ -44,7 +44,7 @@
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
- const union ${GEMM_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+ const union ${CONV_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
{
assert(mr != 0);
assert(mr <= ${MR});
diff --git a/src/qs8-igemm/MRx4c8-sse.c.in b/src/qs8-igemm/MRx4c8-sse.c.in
index 9b9e868..b9affd7 100644
--- a/src/qs8-igemm/MRx4c8-sse.c.in
+++ b/src/qs8-igemm/MRx4c8-sse.c.in
@@ -28,7 +28,7 @@
$DATATYPE = "qc8" if CHANNELWISE else "qs8"
-$GEMM_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
+$CONV_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
$PARAMS_STRUCT = ("" if CHANNELWISE else REQUANTIZATION.lower() + "_") + ("sse4" if SSE >= 4 else "sse2")
$ISA = "xop" if XOP else "avx" if AVX else {2: "sse2", 3: "ssse3", 4: "sse41"}[SSE]
void xnn_${DATATYPE}_igemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x4c8__${ISA}_${VARIANT.lower()}(
@@ -43,7 +43,7 @@
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
- const union ${GEMM_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+ const union ${CONV_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
{
assert(mr != 0);
assert(mr <= ${MR});
diff --git a/src/qs8-igemm/MRx8c8-avx2.c.in b/src/qs8-igemm/MRx8c8-avx2.c.in
index 40f2ef7..58367d0 100644
--- a/src/qs8-igemm/MRx8c8-avx2.c.in
+++ b/src/qs8-igemm/MRx8c8-avx2.c.in
@@ -17,7 +17,7 @@
$DATATYPE = "qc8" if CHANNELWISE else "qs8"
$PARAMS_STRUCT = "avx2" if CHANNELWISE else REQUANTIZATION.lower() + "_avx2"
-$GEMM_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
+$CONV_PARAMS = "xnn_qs8_minmax_params" if CHANNELWISE else "xnn_qs8_conv_minmax_params"
void xnn_${DATATYPE}_igemm_minmax_${REQUANTIZATION.lower()}_ukernel_${MR}x8c8__avx2(
size_t mr,
size_t nc,
@@ -30,7 +30,7 @@
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
- const union ${GEMM_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
+ const union ${CONV_PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN XNN_DISABLE_MSAN
{
assert(mr != 0);
assert(mr <= ${MR});
diff --git a/src/xnnpack/dwconv.h b/src/xnnpack/dwconv.h
index e71cda7..3dc3868 100644
--- a/src/xnnpack/dwconv.h
+++ b/src/xnnpack/dwconv.h
@@ -465,6 +465,28 @@
DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_gemmlowp_ukernel_up4x25__scalar)
+#define DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(fn_name) \
+ XNN_INTERNAL void fn_name( \
+ size_t channels, \
+ size_t output_width, \
+ const int8_t** input, \
+ const void* weights, \
+ int8_t* output, \
+ size_t input_stride, \
+ size_t output_increment, \
+ size_t input_offset, \
+ const int8_t* zero, \
+ const union xnn_qs8_minmax_params* params);
+
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16)
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16)
+
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32)
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32)
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32)
+DECLARE_QC8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32)
+
+
#define DECLARE_F32_DWCONV2D_CHW_MINMAX_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t input_height, \
diff --git a/src/xnnpack/pack.h b/src/xnnpack/pack.h
index 06dd8e8..c0b223b 100644
--- a/src/xnnpack/pack.h
+++ b/src/xnnpack/pack.h
@@ -369,6 +369,7 @@
const void* k,
const void* b,
void* packed_w,
+ size_t extra_bytes,
const void* params);
XNN_INTERNAL void xnn_pack_f32_dwconv_ghw_w(
@@ -379,6 +380,7 @@
const float* k,
const float* b,
float* packed_w,
+ size_t extra_bytes,
const void* params);
XNN_INTERNAL void xnn_pack_f16_dwconv_ghw_w(
@@ -389,6 +391,7 @@
const uint16_t* k,
const uint16_t* b,
uint16_t* packed_w,
+ size_t extra_bytes,
const void* params);
XNN_INTERNAL void xnn_pack_qu8_dwconv_ghw_w(
@@ -399,6 +402,7 @@
const uint8_t* k,
const int32_t* b,
void* packed_w,
+ size_t extra_bytes,
const struct xnn_qu8_packing_params* params);
XNN_INTERNAL void xnn_pack_qs8_dwconv_ghw_w(
@@ -409,6 +413,7 @@
const int8_t* k,
const int32_t* b,
void* packed_w,
+ size_t extra_bytes,
const struct xnn_qs8_packing_params* params);
diff --git a/src/xnnpack/params.h b/src/xnnpack/params.h
index 4062602..45a13f3 100644
--- a/src/xnnpack/params.h
+++ b/src/xnnpack/params.h
@@ -1139,17 +1139,17 @@
const void* zero,
const struct xnn_f16_minmax_params* params);
-typedef void (*xnn_qu8_dwconv_minmax_unipass_ukernel_function)(
+typedef void (*xnn_qc8_dwconv_minmax_unipass_ukernel_function)(
size_t channels,
size_t output_width,
- const uint8_t** input,
+ const int8_t** input,
const void* weights,
- uint8_t* output,
+ int8_t* output,
size_t input_stride,
size_t output_increment,
size_t input_offset,
- const uint8_t* zero,
- const union xnn_qu8_conv_minmax_params* params);
+ const int8_t* zero,
+ const union xnn_qs8_minmax_params* params);
typedef void (*xnn_qs8_dwconv_minmax_unipass_ukernel_function)(
size_t channels,
@@ -1163,6 +1163,18 @@
const int8_t* zero,
const union xnn_qs8_conv_minmax_params* params);
+typedef void (*xnn_qu8_dwconv_minmax_unipass_ukernel_function)(
+ size_t channels,
+ size_t output_width,
+ const uint8_t** input,
+ const void* weights,
+ uint8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const uint8_t* zero,
+ const union xnn_qu8_conv_minmax_params* params);
+
typedef void (*xnn_dwconv_multipass_ukernel_function)(
size_t channels,
size_t output_width,
diff --git a/test/dwconv-microkernel-tester.h b/test/dwconv-microkernel-tester.h
index c13787f..8862114 100644
--- a/test/dwconv-microkernel-tester.h
+++ b/test/dwconv-microkernel-tester.h
@@ -195,7 +195,8 @@
const xnn_qu8_packing_params packing_params = { input_zero_point(), kernel_zero_point() };
xnn_pack_qu8_dwconv_ghw_w(
kr(), 1, channels(), cr(),
- kernel.data(), bias.data(), packed_weights.data(), &packing_params);
+ kernel.data(), bias.data(), packed_weights.data(),
+ 0 /* extra bytes */, &packing_params);
for (size_t i = 0; i < indirection.size(); i++) {
indirection[i] = input.data() + i * channels() - input_offset();
}
@@ -270,6 +271,129 @@
}
void Test(
+ xnn_qc8_dwconv_minmax_unipass_ukernel_function dwconv_minmax,
+ xnn_init_qs8_minmax_params_fn init_params,
+ xnn_init_qs8_requantization_params_fn init_requantization_params,
+ xnn_qs8_requantize_fn requantize) const
+ {
+ std::random_device random_device;
+ auto rng = std::mt19937(random_device());
+ auto i32rng = std::bind(std::uniform_int_distribution<int32_t>(-10000, 10000), rng);
+ auto i8rng = std::bind(
+ std::uniform_int_distribution<uint32_t>(std::numeric_limits<int8_t>::min(), std::numeric_limits<int8_t>::max()), rng);
+
+ std::vector<const int8_t*> indirection((width() - 1) * step() + kr());
+ std::vector<int8_t> input(XNN_EXTRA_BYTES / sizeof(int8_t) + indirection.size() * channels());
+ std::vector<int8_t> kernel(channels() * kr());
+ std::vector<int32_t> bias(channels());
+ std::vector<int8_t, AlignedAllocator<int8_t, 64>> packed_weights((kr() + (sizeof(int32_t) + sizeof(float)) / sizeof(int8_t)) * packed_channels());
+ std::vector<int8_t> zero(channels() + XNN_EXTRA_BYTES / sizeof(int8_t));
+ std::vector<int8_t> output((width() - 1) * output_stride() + channels());
+ std::vector<int32_t> accumulators(width() * channels());
+ std::vector<float> scale(channels());
+ std::vector<int8_t> output_ref(width() * channels());
+
+ for (size_t iteration = 0; iteration < iterations(); iteration++) {
+ do {
+ std::generate(input.begin(), input.end(), std::ref(i8rng));
+ } while (input.size() > 1 && *std::max_element(input.cbegin(), input.cend()) == *std::min_element(input.cbegin(), input.cend()));
+ do {
+ std::generate(kernel.begin(), kernel.end(), std::ref(i8rng));
+ } while (kernel.size() > 1 && *std::max_element(kernel.cbegin(), kernel.cend()) == *std::min_element(kernel.cbegin(), kernel.cend()));
+ std::generate(bias.begin(), bias.end(), std::ref(i32rng));
+ std::fill(zero.begin(), zero.end(), int8_t(input_zero_point() - 0x80));
+ std::fill(output.begin(), output.end(), 0xA5);
+
+ std::fill(packed_weights.begin(), packed_weights.end(), 0);
+ const xnn_qs8_packing_params packing_params = { int8_t(input_zero_point() - 0x80) };
+ xnn_pack_qs8_dwconv_ghw_w(
+ kr(), 1, channels(), cr(),
+ kernel.data(), bias.data(), packed_weights.data(), cr() * sizeof(float),
+ &packing_params);
+ for (size_t i = 0; i < indirection.size(); i++) {
+ indirection[i] = input.data() + i * channels() - input_offset();
+ }
+ std::shuffle(indirection.begin(), indirection.end(), rng);
+ if (zero_index() != SIZE_MAX) {
+ for (size_t i = 0; i < indirection.size(); i += kr()) {
+ indirection[i + zero_index()] = zero.data();
+ }
+ }
+
+ // Compute reference results, without renormalization.
+ for (size_t x = 0; x < width(); x++) {
+ for (size_t c = 0; c < channels(); c++) {
+ float acc = bias[c];
+ for (size_t k = 0; k < kr(); k++) {
+ if (indirection[x * step() + k] != zero.data()) {
+ acc +=
+ (int32_t(indirection[x * step() + k][c + input_offset()]) - int32_t(input_zero_point() - 0x80)) *
+ int32_t(kernel[c * kr() + k]);
+ }
+ }
+ accumulators[x * channels() + c] = acc;
+ }
+ }
+
+ // Compute renormalization parameters.
+ const int8_t output_zero_point = -1;
+ for (size_t c = 0; c < channels(); c++) {
+ int32_t accumulated_min = accumulators[c];
+ int32_t accumulated_max = accumulators[c];
+ for (size_t x = 0; x < width(); x++) {
+ accumulated_min = std::min(accumulated_min, accumulators[x * width() + c]);
+ accumulated_max = std::max(accumulated_max, accumulators[x * width() + c]);
+ }
+ const uint32_t accumulated_range = uint32_t(accumulated_max - accumulated_min);
+ const float output_scale = accumulated_range >= 256 ? double(accumulated_range) / 255.0 : 1.00001;
+ scale[c] = 1.0f / output_scale;
+ }
+ xnn_init_qc8_scale_fp32_params(
+ channels(), cr(),
+ cr() * (kr() * sizeof(int8_t) + sizeof(int32_t) + sizeof(float)), scale.data(),
+ (void*) ((uintptr_t) packed_weights.data() + cr() * (kr() * sizeof(int8_t) + sizeof(int32_t))));
+
+ // Prepare parameters.
+ union xnn_qs8_minmax_params minmax_params;
+ init_params(&minmax_params,
+ output_zero_point, int8_t(qmin() - 0x80), int8_t(qmax() - 0x80));
+ std::vector<xnn_qs8_requantization_params> requantization_params(channels());
+ for (size_t c = 0; c < channels(); c++) {
+ init_requantization_params(&requantization_params[c],
+ scale[c], output_zero_point, int8_t(qmin() - 0x80), int8_t(qmax() - 0x80));
+ }
+
+ // Renormalize reference results.
+ for (size_t x = 0; x < width(); x++) {
+ for (size_t c = 0; c < channels(); c++) {
+ output_ref[x * channels() + c] = requantize(accumulators[x * channels() + c], &requantization_params[c]);
+ }
+ }
+
+ // Call optimized micro-kernel.
+ dwconv_minmax(
+ channels(), width(),
+ indirection.data(), packed_weights.data(), output.data(),
+ step() * sizeof(void*),
+ (output_stride() - channels()) * sizeof(int8_t),
+ input_offset() * sizeof(int8_t), zero.data(),
+ &minmax_params);
+
+ // Verify results.
+ for (size_t x = 0; x < width(); x++) {
+ for (size_t c = 0; c < channels(); c++) {
+ ASSERT_GE(int32_t(output[x * output_stride() + c]), int32_t(qmin()) - 0x80)
+ << "x = " << x << ", channel = " << c;
+ ASSERT_LE(int32_t(output[x * output_stride() + c]), int32_t(qmax()) - 0x80)
+ << "x = " << x << ", channel = " << c;
+ ASSERT_EQ(int32_t(output[x * output_stride() + c]), int32_t(output_ref[x * channels() + c]))
+ << "x = " << x << ", channel = " << c << ", accumulator = " << accumulators[x * channels() + c];
+ }
+ }
+ }
+ }
+
+ void Test(
xnn_qs8_dwconv_minmax_unipass_ukernel_function dwconv_minmax,
xnn_init_qs8_conv_minmax_params_fn init_params,
xnn_init_qs8_requantization_params_fn init_requantization_params,
@@ -306,7 +430,8 @@
const xnn_qs8_packing_params packing_params = { int8_t(input_zero_point() - 0x80) };
xnn_pack_qs8_dwconv_ghw_w(
kr(), 1, channels(), cr(),
- kernel.data(), bias.data(), packed_weights.data(), &packing_params);
+ kernel.data(), bias.data(), packed_weights.data(),
+ 0 /* extra bytes */, &packing_params);
for (size_t i = 0; i < indirection.size(); i++) {
indirection[i] = input.data() + i * channels() - input_offset();
}
@@ -406,7 +531,8 @@
std::fill(packed_weights.begin(), packed_weights.end(), 0);
xnn_pack_f16_dwconv_ghw_w(
kr(), 1, channels(), cr(),
- kernel.data(), bias.data(), packed_weights.data(), nullptr);
+ kernel.data(), bias.data(), packed_weights.data(),
+ 0 /* extra bytes */, nullptr);
for (size_t i = 0; i < indirection.size(); i++) {
indirection[i] = input.data() + i * channels() - input_offset();
}
@@ -496,7 +622,8 @@
std::fill(packed_weights.begin(), packed_weights.end(), 0.0f);
xnn_pack_f32_dwconv_ghw_w(
kr(), 1, channels(), cr(),
- kernel.data(), bias.data(), packed_weights.data(), nullptr);
+ kernel.data(), bias.data(), packed_weights.data(),
+ 0 /* extra bytes */, nullptr);
for (size_t i = 0; i < indirection.size(); i++) {
indirection[i] = input.data() + i * channels() - input_offset();
}
@@ -567,7 +694,8 @@
std::fill(packed_weights.begin(), packed_weights.end(), 0.0f);
xnn_pack_f32_dwconv_ghw_w(
kr(), 1, channels(), cr(),
- kernel.data(), bias.data(), packed_weights.data(), nullptr);
+ kernel.data(), bias.data(), packed_weights.data(),
+ 0 /* extra bytes */, nullptr);
for (size_t i = 0; i < indirection.size(); i++) {
indirection[i] = input.data() + i * channels() - input_offset();
}
diff --git a/test/qc8-dwconv-minmax-fp32.cc b/test/qc8-dwconv-minmax-fp32.cc
new file mode 100644
index 0000000..214d4a0
--- /dev/null
+++ b/test/qc8-dwconv-minmax-fp32.cc
@@ -0,0 +1,1136 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+// All rights reserved.
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+//
+// Auto-generated file. Do not edit!
+// Specification: test/qc8-dwconv-minmax-fp32.yaml
+// Generator: tools/generate-dwconv-test.py
+
+
+#include <gtest/gtest.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/isa-checks.h>
+
+#include <xnnpack/dwconv.h>
+#include "dwconv-microkernel-tester.h"
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL16, c_eq_16) {
+ TEST_REQUIRES_X86_AVX2;
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(16)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL16, c_div_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL16, c_div_16_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL16, c_div_16_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL16, c_lt_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 1; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL16, c_gt_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL16, c_gt_16_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL16, c_gt_16_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL16, multipixel) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL16, multipixel_with_step) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ for (size_t step = 2; step <= 25; step++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL16, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(16)
+ .width(5)
+ .output_stride(83)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL16, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL16, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL16, input_offset) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .input_offset(304)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL16, zero) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t mz = 0; mz < 25; mz++) {
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .input_offset(304)
+ .zero_index(mz)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL16, c_eq_32) {
+ TEST_REQUIRES_X86_AVX2;
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(32)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL16, c_div_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL16, c_div_32_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL16, c_div_32_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL16, c_lt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 1; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL16, c_gt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 33; channels < 64; channels++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL16, c_gt_32_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 33; channels < 64; channels++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL16, c_gt_32_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 33; channels < 64; channels++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL16, multipixel) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL16, multipixel_with_step) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ for (size_t step = 2; step <= 25; step++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL16, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(32)
+ .width(5)
+ .output_stride(163)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL16, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL16, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL16, input_offset) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .input_offset(592)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL16, zero) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t mz = 0; mz < 25; mz++) {
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .input_offset(592)
+ .zero_index(mz)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QC8_DWCONV_MINMAX_FP32_UP8X25__AVX2_MUL32, c_eq_8) {
+ TEST_REQUIRES_X86_AVX2;
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(25)
+ .channels(8)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP8X25__AVX2_MUL32, c_div_8) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP8X25__AVX2_MUL32, c_div_8_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(25)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP8X25__AVX2_MUL32, c_div_8_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(25)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP8X25__AVX2_MUL32, c_lt_8) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 1; channels < 8; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP8X25__AVX2_MUL32, c_gt_8) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 9; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP8X25__AVX2_MUL32, c_gt_8_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 9; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(25)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP8X25__AVX2_MUL32, c_gt_8_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 9; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(25)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP8X25__AVX2_MUL32, multipixel) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP8X25__AVX2_MUL32, multipixel_with_step) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ for (size_t step = 2; step <= 25; step++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP8X25__AVX2_MUL32, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(25)
+ .channels(8)
+ .width(5)
+ .output_stride(43)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP8X25__AVX2_MUL32, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP8X25__AVX2_MUL32, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP8X25__AVX2_MUL32, input_offset) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(25)
+ .channels(channels)
+ .input_offset(176)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP8X25__AVX2_MUL32, zero) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t mz = 0; mz < 25; mz++) {
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(25)
+ .channels(channels)
+ .input_offset(176)
+ .zero_index(mz)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL32, c_eq_16) {
+ TEST_REQUIRES_X86_AVX2;
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(16)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL32, c_div_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL32, c_div_16_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL32, c_div_16_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL32, c_lt_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 1; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL32, c_gt_16) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL32, c_gt_16_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL32, c_gt_16_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL32, multipixel) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL32, multipixel_with_step) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ for (size_t step = 2; step <= 25; step++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL32, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(16)
+ .width(5)
+ .output_stride(83)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL32, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL32, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL32, input_offset) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .input_offset(304)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP16X25__AVX2_MUL32, zero) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t mz = 0; mz < 25; mz++) {
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(25)
+ .channels(channels)
+ .input_offset(304)
+ .zero_index(mz)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QC8_DWCONV_MINMAX_FP32_UP24X25__AVX2_MUL32, c_eq_24) {
+ TEST_REQUIRES_X86_AVX2;
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(25)
+ .channels(24)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP24X25__AVX2_MUL32, c_div_24) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP24X25__AVX2_MUL32, c_div_24_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(25)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP24X25__AVX2_MUL32, c_div_24_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(25)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP24X25__AVX2_MUL32, c_lt_24) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 1; channels < 24; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP24X25__AVX2_MUL32, c_gt_24) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 25; channels < 48; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP24X25__AVX2_MUL32, c_gt_24_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 25; channels < 48; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(25)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP24X25__AVX2_MUL32, c_gt_24_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 25; channels < 48; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(25)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP24X25__AVX2_MUL32, multipixel) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP24X25__AVX2_MUL32, multipixel_with_step) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ for (size_t step = 2; step <= 25; step++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP24X25__AVX2_MUL32, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(25)
+ .channels(24)
+ .width(5)
+ .output_stride(127)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP24X25__AVX2_MUL32, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP24X25__AVX2_MUL32, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP24X25__AVX2_MUL32, input_offset) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(25)
+ .channels(channels)
+ .input_offset(464)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP24X25__AVX2_MUL32, zero) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t mz = 0; mz < 25; mz++) {
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(25)
+ .channels(channels)
+ .input_offset(464)
+ .zero_index(mz)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL32, c_eq_32) {
+ TEST_REQUIRES_X86_AVX2;
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(32)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL32, c_div_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL32, c_div_32_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL32, c_div_32_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL32, c_lt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 1; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL32, c_gt_32) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 33; channels < 64; channels++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL32, c_gt_32_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 33; channels < 64; channels++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL32, c_gt_32_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 33; channels < 64; channels++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL32, multipixel) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL32, multipixel_with_step) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ for (size_t step = 2; step <= 25; step++) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL32, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(32)
+ .width(5)
+ .output_stride(163)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL32, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL32, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_AVX2;
+ for (size_t channels = 1; channels <= 160; channels += 31) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL32, input_offset) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .input_offset(592)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+
+ TEST(QC8_DWCONV_MINMAX_FP32_UP32X25__AVX2_MUL32, zero) {
+ TEST_REQUIRES_X86_AVX2;
+ for (uint32_t mz = 0; mz < 25; mz++) {
+ for (uint32_t channels = 64; channels < 512; channels += 96) {
+ DWConvMicrokernelTester()
+ .cr(32)
+ .kr(25)
+ .channels(channels)
+ .input_offset(592)
+ .zero_index(mz)
+ .Test(xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32, xnn_init_qs8_minmax_avx2_params, xnn_init_qs8_requantization_fp32_params, xnn_qs8_requantize_fp32);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
diff --git a/test/qc8-dwconv-minmax-fp32.yaml b/test/qc8-dwconv-minmax-fp32.yaml
new file mode 100644
index 0000000..ad2f7c1
--- /dev/null
+++ b/test/qc8-dwconv-minmax-fp32.yaml
@@ -0,0 +1,17 @@
+# Copyright 2021 Google LLC
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul16
+ init: xnn_init_qs8_minmax_avx2_params
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul16
+ init: xnn_init_qs8_minmax_avx2_params
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32
+ init: xnn_init_qs8_minmax_avx2_params
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__avx2_mul32
+ init: xnn_init_qs8_minmax_avx2_params
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up24x25__avx2_mul32
+ init: xnn_init_qs8_minmax_avx2_params
+- name: xnn_qc8_dwconv_minmax_fp32_ukernel_up32x25__avx2_mul32
+ init: xnn_init_qs8_minmax_avx2_params