NR=16 GEMM and IGEMM micro-kernels in AVX and FMA3 implementations
PiperOrigin-RevId: 284464344
diff --git a/BUILD.bazel b/BUILD.bazel
index 676c071..1421c6a 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -766,16 +766,28 @@
"src/f32-gemm/gen/5x8-avx-broadcast.c",
"src/f32-gemm/gen/6x8-avx-broadcast.c",
"src/f32-gemm/gen/7x8-avx-broadcast.c",
+ "src/f32-gemm/gen/1x16-avx-broadcast.c",
+ "src/f32-gemm/gen/3x16-avx-broadcast.c",
+ "src/f32-gemm/gen/4x16-avx-broadcast.c",
+ "src/f32-gemm/gen/5x16-avx-broadcast.c",
"src/f32-gemm/gen-inc/1x8-avx-broadcast.c",
"src/f32-gemm/gen-inc/4x8-avx-broadcast.c",
"src/f32-gemm/gen-inc/5x8-avx-broadcast.c",
"src/f32-gemm/gen-inc/6x8-avx-broadcast.c",
"src/f32-gemm/gen-inc/7x8-avx-broadcast.c",
+ "src/f32-gemm/gen-inc/1x16-avx-broadcast.c",
+ "src/f32-gemm/gen-inc/3x16-avx-broadcast.c",
+ "src/f32-gemm/gen-inc/4x16-avx-broadcast.c",
+ "src/f32-gemm/gen-inc/5x16-avx-broadcast.c",
"src/f32-igemm/gen/1x8-avx-broadcast.c",
"src/f32-igemm/gen/4x8-avx-broadcast.c",
"src/f32-igemm/gen/5x8-avx-broadcast.c",
"src/f32-igemm/gen/6x8-avx-broadcast.c",
"src/f32-igemm/gen/7x8-avx-broadcast.c",
+ "src/f32-igemm/gen/1x16-avx-broadcast.c",
+ "src/f32-igemm/gen/3x16-avx-broadcast.c",
+ "src/f32-igemm/gen/4x16-avx-broadcast.c",
+ "src/f32-igemm/gen/5x16-avx-broadcast.c",
"src/f32-rmax/avx.c",
"src/f32-vscale/avx-unroll32.c",
]
@@ -799,18 +811,30 @@
"src/f32-gemm/gen/6x8-fma3-broadcast.c",
"src/f32-gemm/gen/7x8-fma3-broadcast.c",
"src/f32-gemm/gen/8x8-fma3-broadcast.c",
+ "src/f32-gemm/gen/1x16-fma3-broadcast.c",
+ "src/f32-gemm/gen/3x16-fma3-broadcast.c",
+ "src/f32-gemm/gen/4x16-fma3-broadcast.c",
+ "src/f32-gemm/gen/5x16-fma3-broadcast.c",
"src/f32-gemm/gen-inc/1x8-fma3-broadcast.c",
"src/f32-gemm/gen-inc/4x8-fma3-broadcast.c",
"src/f32-gemm/gen-inc/5x8-fma3-broadcast.c",
"src/f32-gemm/gen-inc/6x8-fma3-broadcast.c",
"src/f32-gemm/gen-inc/7x8-fma3-broadcast.c",
"src/f32-gemm/gen-inc/8x8-fma3-broadcast.c",
+ "src/f32-gemm/gen-inc/1x16-fma3-broadcast.c",
+ "src/f32-gemm/gen-inc/3x16-fma3-broadcast.c",
+ "src/f32-gemm/gen-inc/4x16-fma3-broadcast.c",
+ "src/f32-gemm/gen-inc/5x16-fma3-broadcast.c",
"src/f32-igemm/gen/1x8-fma3-broadcast.c",
"src/f32-igemm/gen/4x8-fma3-broadcast.c",
"src/f32-igemm/gen/5x8-fma3-broadcast.c",
"src/f32-igemm/gen/6x8-fma3-broadcast.c",
"src/f32-igemm/gen/7x8-fma3-broadcast.c",
"src/f32-igemm/gen/8x8-fma3-broadcast.c",
+ "src/f32-igemm/gen/1x16-fma3-broadcast.c",
+ "src/f32-igemm/gen/3x16-fma3-broadcast.c",
+ "src/f32-igemm/gen/4x16-fma3-broadcast.c",
+ "src/f32-igemm/gen/5x16-fma3-broadcast.c",
]
AVX2_UKERNELS = [
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 87d9fbf..7b0665a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -764,16 +764,28 @@
src/f32-gemm/gen/5x8-avx-broadcast.c
src/f32-gemm/gen/6x8-avx-broadcast.c
src/f32-gemm/gen/7x8-avx-broadcast.c
+ src/f32-gemm/gen/1x16-avx-broadcast.c
+ src/f32-gemm/gen/3x16-avx-broadcast.c
+ src/f32-gemm/gen/4x16-avx-broadcast.c
+ src/f32-gemm/gen/5x16-avx-broadcast.c
src/f32-gemm/gen-inc/1x8-avx-broadcast.c
src/f32-gemm/gen-inc/4x8-avx-broadcast.c
src/f32-gemm/gen-inc/5x8-avx-broadcast.c
src/f32-gemm/gen-inc/6x8-avx-broadcast.c
src/f32-gemm/gen-inc/7x8-avx-broadcast.c
+ src/f32-gemm/gen-inc/1x16-avx-broadcast.c
+ src/f32-gemm/gen-inc/3x16-avx-broadcast.c
+ src/f32-gemm/gen-inc/4x16-avx-broadcast.c
+ src/f32-gemm/gen-inc/5x16-avx-broadcast.c
src/f32-igemm/gen/1x8-avx-broadcast.c
src/f32-igemm/gen/4x8-avx-broadcast.c
src/f32-igemm/gen/5x8-avx-broadcast.c
src/f32-igemm/gen/6x8-avx-broadcast.c
src/f32-igemm/gen/7x8-avx-broadcast.c
+ src/f32-igemm/gen/1x16-avx-broadcast.c
+ src/f32-igemm/gen/3x16-avx-broadcast.c
+ src/f32-igemm/gen/4x16-avx-broadcast.c
+ src/f32-igemm/gen/5x16-avx-broadcast.c
src/f32-rmax/avx.c
src/f32-vscale/avx-unroll32.c)
@@ -796,18 +808,30 @@
src/f32-gemm/gen/6x8-fma3-broadcast.c
src/f32-gemm/gen/7x8-fma3-broadcast.c
src/f32-gemm/gen/8x8-fma3-broadcast.c
+ src/f32-gemm/gen/1x16-fma3-broadcast.c
+ src/f32-gemm/gen/3x16-fma3-broadcast.c
+ src/f32-gemm/gen/4x16-fma3-broadcast.c
+ src/f32-gemm/gen/5x16-fma3-broadcast.c
src/f32-gemm/gen-inc/1x8-fma3-broadcast.c
src/f32-gemm/gen-inc/4x8-fma3-broadcast.c
src/f32-gemm/gen-inc/5x8-fma3-broadcast.c
src/f32-gemm/gen-inc/6x8-fma3-broadcast.c
src/f32-gemm/gen-inc/7x8-fma3-broadcast.c
src/f32-gemm/gen-inc/8x8-fma3-broadcast.c
+ src/f32-gemm/gen-inc/1x16-fma3-broadcast.c
+ src/f32-gemm/gen-inc/3x16-fma3-broadcast.c
+ src/f32-gemm/gen-inc/4x16-fma3-broadcast.c
+ src/f32-gemm/gen-inc/5x16-fma3-broadcast.c
src/f32-igemm/gen/1x8-fma3-broadcast.c
src/f32-igemm/gen/4x8-fma3-broadcast.c
src/f32-igemm/gen/5x8-fma3-broadcast.c
src/f32-igemm/gen/6x8-fma3-broadcast.c
src/f32-igemm/gen/7x8-fma3-broadcast.c
- src/f32-igemm/gen/8x8-fma3-broadcast.c)
+ src/f32-igemm/gen/8x8-fma3-broadcast.c
+ src/f32-igemm/gen/1x16-fma3-broadcast.c
+ src/f32-igemm/gen/3x16-fma3-broadcast.c
+ src/f32-igemm/gen/4x16-fma3-broadcast.c
+ src/f32-igemm/gen/5x16-fma3-broadcast.c)
SET(XNNPACK_AVX2_MICROKERNEL_SRCS
src/f32-raddexpminusmax/avx2-p5-unroll64.c
diff --git a/bench/f32-gemm-e2e.cc b/bench/f32-gemm-e2e.cc
index d207283..5f97603 100644
--- a/bench/f32-gemm-e2e.cc
+++ b/bench/f32-gemm-e2e.cc
@@ -606,6 +606,36 @@
benchmark::utils::CheckAVX);
}
+ static void f32_gemm_3x16__avx_broadcast(benchmark::State& state, models::ExecutionPlanFactory model) {
+ GEMMEnd2EndBenchmark(state, model,
+ xnn_f32_gemm_ukernel_3x16__avx_broadcast,
+ xnn_f32_igemm_ukernel_3x16__avx_broadcast,
+ xnn_f32_gemm_ukernel_1x16__avx_broadcast,
+ xnn_f32_igemm_ukernel_1x16__avx_broadcast,
+ 3 /* mr */, 16 /* nr */, 0 /* log2_kr */, 0 /* log2_sr */,
+ benchmark::utils::CheckAVX);
+ }
+
+ static void f32_gemm_4x16__avx_broadcast(benchmark::State& state, models::ExecutionPlanFactory model) {
+ GEMMEnd2EndBenchmark(state, model,
+ xnn_f32_gemm_ukernel_4x16__avx_broadcast,
+ xnn_f32_igemm_ukernel_4x16__avx_broadcast,
+ xnn_f32_gemm_ukernel_1x16__avx_broadcast,
+ xnn_f32_igemm_ukernel_1x16__avx_broadcast,
+ 4 /* mr */, 16 /* nr */, 0 /* log2_kr */, 0 /* log2_sr */,
+ benchmark::utils::CheckAVX);
+ }
+
+ static void f32_gemm_5x16__avx_broadcast(benchmark::State& state, models::ExecutionPlanFactory model) {
+ GEMMEnd2EndBenchmark(state, model,
+ xnn_f32_gemm_ukernel_5x16__avx_broadcast,
+ xnn_f32_igemm_ukernel_5x16__avx_broadcast,
+ xnn_f32_gemm_ukernel_1x16__avx_broadcast,
+ xnn_f32_igemm_ukernel_1x16__avx_broadcast,
+ 5 /* mr */, 16 /* nr */, 0 /* log2_kr */, 0 /* log2_sr */,
+ benchmark::utils::CheckAVX);
+ }
+
static void f32_gemm_4x8__fma3_broadcast(benchmark::State& state, models::ExecutionPlanFactory model) {
GEMMEnd2EndBenchmark(state, model,
xnn_f32_gemm_ukernel_4x8__fma3_broadcast,
@@ -656,6 +686,36 @@
benchmark::utils::CheckFMA3);
}
+ static void f32_gemm_3x16__fma3_broadcast(benchmark::State& state, models::ExecutionPlanFactory model) {
+ GEMMEnd2EndBenchmark(state, model,
+ xnn_f32_gemm_ukernel_3x16__fma3_broadcast,
+ xnn_f32_igemm_ukernel_3x16__fma3_broadcast,
+ xnn_f32_gemm_ukernel_1x16__fma3_broadcast,
+ xnn_f32_igemm_ukernel_1x16__fma3_broadcast,
+ 3 /* mr */, 16 /* nr */, 0 /* log2_kr */, 0 /* log2_sr */,
+ benchmark::utils::CheckFMA3);
+ }
+
+ static void f32_gemm_4x16__fma3_broadcast(benchmark::State& state, models::ExecutionPlanFactory model) {
+ GEMMEnd2EndBenchmark(state, model,
+ xnn_f32_gemm_ukernel_4x16__fma3_broadcast,
+ xnn_f32_igemm_ukernel_4x16__fma3_broadcast,
+ xnn_f32_gemm_ukernel_1x16__fma3_broadcast,
+ xnn_f32_igemm_ukernel_1x16__fma3_broadcast,
+ 4 /* mr */, 16 /* nr */, 0 /* log2_kr */, 0 /* log2_sr */,
+ benchmark::utils::CheckFMA3);
+ }
+
+ static void f32_gemm_5x16__fma3_broadcast(benchmark::State& state, models::ExecutionPlanFactory model) {
+ GEMMEnd2EndBenchmark(state, model,
+ xnn_f32_gemm_ukernel_5x16__fma3_broadcast,
+ xnn_f32_igemm_ukernel_5x16__fma3_broadcast,
+ xnn_f32_gemm_ukernel_1x16__fma3_broadcast,
+ xnn_f32_igemm_ukernel_1x16__fma3_broadcast,
+ 5 /* mr */, 16 /* nr */, 0 /* log2_kr */, 0 /* log2_sr */,
+ benchmark::utils::CheckFMA3);
+ }
+
static void f32_gemm_4x16__avx512f_broadcast(benchmark::State& state, models::ExecutionPlanFactory model) {
GEMMEnd2EndBenchmark(state, model,
xnn_f32_gemm_ukernel_4x16__avx512f_broadcast,
@@ -727,6 +787,15 @@
BENCHMARK_CAPTURE(f32_gemm_7x8__avx_broadcast, mobilenet_v1, models::MobileNetV1)->Unit(benchmark::kMicrosecond)->UseRealTime();
BENCHMARK_CAPTURE(f32_gemm_7x8__avx_broadcast, mobilenet_v2, models::MobileNetV2)->Unit(benchmark::kMicrosecond)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_gemm_3x16__avx_broadcast, mobilenet_v1, models::MobileNetV1)->Unit(benchmark::kMicrosecond)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_gemm_3x16__avx_broadcast, mobilenet_v2, models::MobileNetV2)->Unit(benchmark::kMicrosecond)->UseRealTime();
+
+ BENCHMARK_CAPTURE(f32_gemm_4x16__avx_broadcast, mobilenet_v1, models::MobileNetV1)->Unit(benchmark::kMicrosecond)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_gemm_4x16__avx_broadcast, mobilenet_v2, models::MobileNetV2)->Unit(benchmark::kMicrosecond)->UseRealTime();
+
+ BENCHMARK_CAPTURE(f32_gemm_5x16__avx_broadcast, mobilenet_v1, models::MobileNetV1)->Unit(benchmark::kMicrosecond)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_gemm_5x16__avx_broadcast, mobilenet_v2, models::MobileNetV2)->Unit(benchmark::kMicrosecond)->UseRealTime();
+
BENCHMARK_CAPTURE(f32_gemm_4x8__fma3_broadcast, mobilenet_v1, models::MobileNetV1)->Unit(benchmark::kMicrosecond)->UseRealTime();
BENCHMARK_CAPTURE(f32_gemm_4x8__fma3_broadcast, mobilenet_v2, models::MobileNetV2)->Unit(benchmark::kMicrosecond)->UseRealTime();
@@ -742,6 +811,15 @@
BENCHMARK_CAPTURE(f32_gemm_8x8__fma3_broadcast, mobilenet_v1, models::MobileNetV1)->Unit(benchmark::kMicrosecond)->UseRealTime();
BENCHMARK_CAPTURE(f32_gemm_8x8__fma3_broadcast, mobilenet_v2, models::MobileNetV2)->Unit(benchmark::kMicrosecond)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_gemm_3x16__fma3_broadcast, mobilenet_v1, models::MobileNetV1)->Unit(benchmark::kMicrosecond)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_gemm_3x16__fma3_broadcast, mobilenet_v2, models::MobileNetV2)->Unit(benchmark::kMicrosecond)->UseRealTime();
+
+ BENCHMARK_CAPTURE(f32_gemm_4x16__fma3_broadcast, mobilenet_v1, models::MobileNetV1)->Unit(benchmark::kMicrosecond)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_gemm_4x16__fma3_broadcast, mobilenet_v2, models::MobileNetV2)->Unit(benchmark::kMicrosecond)->UseRealTime();
+
+ BENCHMARK_CAPTURE(f32_gemm_5x16__fma3_broadcast, mobilenet_v1, models::MobileNetV1)->Unit(benchmark::kMicrosecond)->UseRealTime();
+ BENCHMARK_CAPTURE(f32_gemm_5x16__fma3_broadcast, mobilenet_v2, models::MobileNetV2)->Unit(benchmark::kMicrosecond)->UseRealTime();
+
BENCHMARK_CAPTURE(f32_gemm_4x16__avx512f_broadcast, mobilenet_v1, models::MobileNetV1)->Unit(benchmark::kMicrosecond)->UseRealTime();
BENCHMARK_CAPTURE(f32_gemm_4x16__avx512f_broadcast, mobilenet_v2, models::MobileNetV2)->Unit(benchmark::kMicrosecond)->UseRealTime();
diff --git a/scripts/generate-f32-gemm.sh b/scripts/generate-f32-gemm.sh
index 2fb1dd4..19ff388 100755
--- a/scripts/generate-f32-gemm.sh
+++ b/scripts/generate-f32-gemm.sh
@@ -216,6 +216,18 @@
tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=7 -D NR=8 -D FMA=0 -D INC=0 -o src/f32-gemm/gen/7x8-avx-broadcast.c
tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=7 -D NR=8 -D FMA=0 -D INC=1 -o src/f32-gemm/gen-inc/7x8-avx-broadcast.c
+
+tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=1 -D NR=16 -D FMA=0 -D INC=0 -o src/f32-gemm/gen/1x16-avx-broadcast.c
+tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=1 -D NR=16 -D FMA=0 -D INC=1 -o src/f32-gemm/gen-inc/1x16-avx-broadcast.c
+
+tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=3 -D NR=16 -D FMA=0 -D INC=0 -o src/f32-gemm/gen/3x16-avx-broadcast.c
+tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=3 -D NR=16 -D FMA=0 -D INC=1 -o src/f32-gemm/gen-inc/3x16-avx-broadcast.c
+
+tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=4 -D NR=16 -D FMA=0 -D INC=0 -o src/f32-gemm/gen/4x16-avx-broadcast.c
+tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=4 -D NR=16 -D FMA=0 -D INC=1 -o src/f32-gemm/gen-inc/4x16-avx-broadcast.c
+
+tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=5 -D NR=16 -D FMA=0 -D INC=0 -o src/f32-gemm/gen/5x16-avx-broadcast.c
+tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=5 -D NR=16 -D FMA=0 -D INC=1 -o src/f32-gemm/gen-inc/5x16-avx-broadcast.c
### FMA3+BROADCAST micro-kernels
tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=1 -D NR=8 -D FMA=3 -D INC=0 -o src/f32-gemm/gen/1x8-fma3-broadcast.c
tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=1 -D NR=8 -D FMA=3 -D INC=1 -o src/f32-gemm/gen-inc/1x8-fma3-broadcast.c
@@ -235,6 +247,18 @@
tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=8 -D NR=8 -D FMA=3 -D INC=0 -o src/f32-gemm/gen/8x8-fma3-broadcast.c
tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=8 -D NR=8 -D FMA=3 -D INC=1 -o src/f32-gemm/gen-inc/8x8-fma3-broadcast.c
+tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=1 -D NR=16 -D FMA=3 -D INC=0 -o src/f32-gemm/gen/1x16-fma3-broadcast.c
+tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=1 -D NR=16 -D FMA=3 -D INC=1 -o src/f32-gemm/gen-inc/1x16-fma3-broadcast.c
+
+tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=3 -D NR=16 -D FMA=3 -D INC=0 -o src/f32-gemm/gen/3x16-fma3-broadcast.c
+tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=3 -D NR=16 -D FMA=3 -D INC=1 -o src/f32-gemm/gen-inc/3x16-fma3-broadcast.c
+
+tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=4 -D NR=16 -D FMA=3 -D INC=0 -o src/f32-gemm/gen/4x16-fma3-broadcast.c
+tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=4 -D NR=16 -D FMA=3 -D INC=1 -o src/f32-gemm/gen-inc/4x16-fma3-broadcast.c
+
+tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=5 -D NR=16 -D FMA=3 -D INC=0 -o src/f32-gemm/gen/5x16-fma3-broadcast.c
+tools/xngen src/f32-gemm/avx-broadcast.c.in -D MR=5 -D NR=16 -D FMA=3 -D INC=1 -o src/f32-gemm/gen-inc/5x16-fma3-broadcast.c
+
################################# x86 AVX-512 #################################
### AVX512F+BROADCAST micro-kernels
tools/xngen src/f32-gemm/avx512-broadcast.c.in -D MR=1 -D NR=16 -D INC=0 -o src/f32-gemm/gen/1x16-avx512f-broadcast.c
diff --git a/scripts/generate-f32-igemm.sh b/scripts/generate-f32-igemm.sh
index d596a4f..fd5da68 100755
--- a/scripts/generate-f32-igemm.sh
+++ b/scripts/generate-f32-igemm.sh
@@ -93,6 +93,10 @@
tools/xngen src/f32-igemm/avx-broadcast.c.in -D MR=5 -D NR=8 -D FMA=0 -o src/f32-igemm/gen/5x8-avx-broadcast.c
tools/xngen src/f32-igemm/avx-broadcast.c.in -D MR=6 -D NR=8 -D FMA=0 -o src/f32-igemm/gen/6x8-avx-broadcast.c
tools/xngen src/f32-igemm/avx-broadcast.c.in -D MR=7 -D NR=8 -D FMA=0 -o src/f32-igemm/gen/7x8-avx-broadcast.c
+tools/xngen src/f32-igemm/avx-broadcast.c.in -D MR=1 -D NR=16 -D FMA=0 -o src/f32-igemm/gen/1x16-avx-broadcast.c
+tools/xngen src/f32-igemm/avx-broadcast.c.in -D MR=3 -D NR=16 -D FMA=0 -o src/f32-igemm/gen/3x16-avx-broadcast.c
+tools/xngen src/f32-igemm/avx-broadcast.c.in -D MR=4 -D NR=16 -D FMA=0 -o src/f32-igemm/gen/4x16-avx-broadcast.c
+tools/xngen src/f32-igemm/avx-broadcast.c.in -D MR=5 -D NR=16 -D FMA=0 -o src/f32-igemm/gen/5x16-avx-broadcast.c
### FMA3+BROADCAST micro-kernels
tools/xngen src/f32-igemm/avx-broadcast.c.in -D MR=1 -D NR=8 -D FMA=3 -o src/f32-igemm/gen/1x8-fma3-broadcast.c
tools/xngen src/f32-igemm/avx-broadcast.c.in -D MR=4 -D NR=8 -D FMA=3 -o src/f32-igemm/gen/4x8-fma3-broadcast.c
@@ -100,6 +104,10 @@
tools/xngen src/f32-igemm/avx-broadcast.c.in -D MR=6 -D NR=8 -D FMA=3 -o src/f32-igemm/gen/6x8-fma3-broadcast.c
tools/xngen src/f32-igemm/avx-broadcast.c.in -D MR=7 -D NR=8 -D FMA=3 -o src/f32-igemm/gen/7x8-fma3-broadcast.c
tools/xngen src/f32-igemm/avx-broadcast.c.in -D MR=8 -D NR=8 -D FMA=3 -o src/f32-igemm/gen/8x8-fma3-broadcast.c
+tools/xngen src/f32-igemm/avx-broadcast.c.in -D MR=1 -D NR=16 -D FMA=3 -o src/f32-igemm/gen/1x16-fma3-broadcast.c
+tools/xngen src/f32-igemm/avx-broadcast.c.in -D MR=3 -D NR=16 -D FMA=3 -o src/f32-igemm/gen/3x16-fma3-broadcast.c
+tools/xngen src/f32-igemm/avx-broadcast.c.in -D MR=4 -D NR=16 -D FMA=3 -o src/f32-igemm/gen/4x16-fma3-broadcast.c
+tools/xngen src/f32-igemm/avx-broadcast.c.in -D MR=5 -D NR=16 -D FMA=3 -o src/f32-igemm/gen/5x16-fma3-broadcast.c
################################# x86 AVX-512 #################################
### AVX512F+BROADCAST micro-kernels
diff --git a/src/f32-gemm/gen-inc/1x16-avx-broadcast.c b/src/f32-gemm/gen-inc/1x16-avx-broadcast.c
new file mode 100644
index 0000000..3554a5e
--- /dev/null
+++ b/src/f32-gemm/gen-inc/1x16-avx-broadcast.c
@@ -0,0 +1,109 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-gemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_ukernel_1x16__avx_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ const float*restrict a,
+ size_t a_stride,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ const float*restrict acc,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 1);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+ assert(acc != NULL);
+
+ const float* a0 = a;
+ float* c0 = c;
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8);
+ acc += 16;
+
+ size_t k = kc;
+ do {
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
+ vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
+
+ k -= sizeof(float);
+ } while (k != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a0 = (const float*) ((uintptr_t) a0 - kc);
+
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c0 += 8;
+ }
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/1x16-fma3-broadcast.c b/src/f32-gemm/gen-inc/1x16-fma3-broadcast.c
new file mode 100644
index 0000000..4a2083a
--- /dev/null
+++ b/src/f32-gemm/gen-inc/1x16-fma3-broadcast.c
@@ -0,0 +1,109 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-gemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_ukernel_1x16__fma3_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ const float*restrict a,
+ size_t a_stride,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ const float*restrict acc,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 1);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+ assert(acc != NULL);
+
+ const float* a0 = a;
+ float* c0 = c;
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8);
+ acc += 16;
+
+ size_t k = kc;
+ do {
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
+ vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
+
+ k -= sizeof(float);
+ } while (k != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a0 = (const float*) ((uintptr_t) a0 - kc);
+
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c0 += 8;
+ }
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/3x16-avx-broadcast.c b/src/f32-gemm/gen-inc/3x16-avx-broadcast.c
new file mode 100644
index 0000000..cd22a7f
--- /dev/null
+++ b/src/f32-gemm/gen-inc/3x16-avx-broadcast.c
@@ -0,0 +1,171 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-gemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_ukernel_3x16__avx_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ const float*restrict a,
+ size_t a_stride,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ const float*restrict acc,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 3);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+ assert(acc != NULL);
+
+ const float* a0 = a;
+ float* c0 = c;
+ const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ a1 = a0;
+ c1 = c0;
+ }
+ const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ a2 = a1;
+ c2 = c1;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8);
+ __m256 vacc1x01234567 = _mm256_load_ps(acc + 16);
+ __m256 vacc1x89ABCDEF = _mm256_load_ps(acc + 24);
+ __m256 vacc2x01234567 = _mm256_load_ps(acc + 32);
+ __m256 vacc2x89ABCDEF = _mm256_load_ps(acc + 40);
+ acc += 48;
+
+ size_t k = kc;
+ do {
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
+ vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
+ vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
+ vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
+ vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF));
+ vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF));
+
+ k -= sizeof(float);
+ } while (k != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a2 = (const float*) ((uintptr_t) a2 - kc);
+ a1 = (const float*) ((uintptr_t) a1 - kc);
+ a0 = (const float*) ((uintptr_t) a0 - kc);
+
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/3x16-fma3-broadcast.c b/src/f32-gemm/gen-inc/3x16-fma3-broadcast.c
new file mode 100644
index 0000000..5e3f481
--- /dev/null
+++ b/src/f32-gemm/gen-inc/3x16-fma3-broadcast.c
@@ -0,0 +1,171 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-gemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_ukernel_3x16__fma3_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ const float*restrict a,
+ size_t a_stride,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ const float*restrict acc,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 3);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+ assert(acc != NULL);
+
+ const float* a0 = a;
+ float* c0 = c;
+ const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ a1 = a0;
+ c1 = c0;
+ }
+ const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ a2 = a1;
+ c2 = c1;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8);
+ __m256 vacc1x01234567 = _mm256_load_ps(acc + 16);
+ __m256 vacc1x89ABCDEF = _mm256_load_ps(acc + 24);
+ __m256 vacc2x01234567 = _mm256_load_ps(acc + 32);
+ __m256 vacc2x89ABCDEF = _mm256_load_ps(acc + 40);
+ acc += 48;
+
+ size_t k = kc;
+ do {
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
+ vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
+ vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
+ vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
+ vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
+ vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
+
+ k -= sizeof(float);
+ } while (k != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a2 = (const float*) ((uintptr_t) a2 - kc);
+ a1 = (const float*) ((uintptr_t) a1 - kc);
+ a0 = (const float*) ((uintptr_t) a0 - kc);
+
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/4x16-avx-broadcast.c b/src/f32-gemm/gen-inc/4x16-avx-broadcast.c
new file mode 100644
index 0000000..0605dbd
--- /dev/null
+++ b/src/f32-gemm/gen-inc/4x16-avx-broadcast.c
@@ -0,0 +1,202 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-gemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_ukernel_4x16__avx_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ const float*restrict a,
+ size_t a_stride,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ const float*restrict acc,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 4);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+ assert(acc != NULL);
+
+ const float* a0 = a;
+ float* c0 = c;
+ const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ a1 = a0;
+ c1 = c0;
+ }
+ const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ a2 = a1;
+ c2 = c1;
+ }
+ const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+ float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+ if XNN_UNPREDICTABLE(mr != 4) {
+ a3 = a2;
+ c3 = c2;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8);
+ __m256 vacc1x01234567 = _mm256_load_ps(acc + 16);
+ __m256 vacc1x89ABCDEF = _mm256_load_ps(acc + 24);
+ __m256 vacc2x01234567 = _mm256_load_ps(acc + 32);
+ __m256 vacc2x89ABCDEF = _mm256_load_ps(acc + 40);
+ __m256 vacc3x01234567 = _mm256_load_ps(acc + 48);
+ __m256 vacc3x89ABCDEF = _mm256_load_ps(acc + 56);
+ acc += 64;
+
+ size_t k = kc;
+ do {
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+ const __m256 va3 = _mm256_broadcast_ss(a3);
+ a3 += 1;
+
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
+ vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
+ vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
+ vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
+ vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
+ vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF));
+ vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF));
+ vacc3x89ABCDEF = _mm256_add_ps(vacc3x89ABCDEF, _mm256_mul_ps(va3, vb89ABCDEF));
+
+ k -= sizeof(float);
+ } while (k != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+ vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+ vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
+ c3 = (float*) ((uintptr_t) c3 + cn_stride);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a3 = (const float*) ((uintptr_t) a3 - kc);
+ a2 = (const float*) ((uintptr_t) a2 - kc);
+ a1 = (const float*) ((uintptr_t) a1 - kc);
+ a0 = (const float*) ((uintptr_t) a0 - kc);
+
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc3x01234567 = vacc3x89ABCDEF;
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c3 += 8;
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c3, vacc3x0123);
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c3 += 4;
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c3, vacc3x0123);
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c3 += 2;
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c3, vacc3x0123);
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/4x16-fma3-broadcast.c b/src/f32-gemm/gen-inc/4x16-fma3-broadcast.c
new file mode 100644
index 0000000..fc9e461
--- /dev/null
+++ b/src/f32-gemm/gen-inc/4x16-fma3-broadcast.c
@@ -0,0 +1,202 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-gemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_ukernel_4x16__fma3_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ const float*restrict a,
+ size_t a_stride,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ const float*restrict acc,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 4);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+ assert(acc != NULL);
+
+ const float* a0 = a;
+ float* c0 = c;
+ const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ a1 = a0;
+ c1 = c0;
+ }
+ const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ a2 = a1;
+ c2 = c1;
+ }
+ const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+ float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+ if XNN_UNPREDICTABLE(mr != 4) {
+ a3 = a2;
+ c3 = c2;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8);
+ __m256 vacc1x01234567 = _mm256_load_ps(acc + 16);
+ __m256 vacc1x89ABCDEF = _mm256_load_ps(acc + 24);
+ __m256 vacc2x01234567 = _mm256_load_ps(acc + 32);
+ __m256 vacc2x89ABCDEF = _mm256_load_ps(acc + 40);
+ __m256 vacc3x01234567 = _mm256_load_ps(acc + 48);
+ __m256 vacc3x89ABCDEF = _mm256_load_ps(acc + 56);
+ acc += 64;
+
+ size_t k = kc;
+ do {
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+ const __m256 va3 = _mm256_broadcast_ss(a3);
+ a3 += 1;
+
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
+ vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
+ vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
+ vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
+ vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
+ vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
+ vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
+ vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
+
+ k -= sizeof(float);
+ } while (k != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+ vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+ vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
+ c3 = (float*) ((uintptr_t) c3 + cn_stride);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a3 = (const float*) ((uintptr_t) a3 - kc);
+ a2 = (const float*) ((uintptr_t) a2 - kc);
+ a1 = (const float*) ((uintptr_t) a1 - kc);
+ a0 = (const float*) ((uintptr_t) a0 - kc);
+
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc3x01234567 = vacc3x89ABCDEF;
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c3 += 8;
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c3, vacc3x0123);
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c3 += 4;
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c3, vacc3x0123);
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c3 += 2;
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c3, vacc3x0123);
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/5x16-avx-broadcast.c b/src/f32-gemm/gen-inc/5x16-avx-broadcast.c
new file mode 100644
index 0000000..95a9273
--- /dev/null
+++ b/src/f32-gemm/gen-inc/5x16-avx-broadcast.c
@@ -0,0 +1,233 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-gemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_ukernel_5x16__avx_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ const float*restrict a,
+ size_t a_stride,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ const float*restrict acc,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 5);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+ assert(acc != NULL);
+
+ const float* a0 = a;
+ float* c0 = c;
+ const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ a1 = a0;
+ c1 = c0;
+ }
+ const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ a2 = a1;
+ c2 = c1;
+ }
+ const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+ float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 4) {
+ a3 = a2;
+ c3 = c2;
+ }
+ const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+ float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 4) {
+ a4 = a3;
+ c4 = c3;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8);
+ __m256 vacc1x01234567 = _mm256_load_ps(acc + 16);
+ __m256 vacc1x89ABCDEF = _mm256_load_ps(acc + 24);
+ __m256 vacc2x01234567 = _mm256_load_ps(acc + 32);
+ __m256 vacc2x89ABCDEF = _mm256_load_ps(acc + 40);
+ __m256 vacc3x01234567 = _mm256_load_ps(acc + 48);
+ __m256 vacc3x89ABCDEF = _mm256_load_ps(acc + 56);
+ __m256 vacc4x01234567 = _mm256_load_ps(acc + 64);
+ __m256 vacc4x89ABCDEF = _mm256_load_ps(acc + 72);
+ acc += 80;
+
+ size_t k = kc;
+ do {
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+ const __m256 va3 = _mm256_broadcast_ss(a3);
+ a3 += 1;
+ const __m256 va4 = _mm256_broadcast_ss(a4);
+ a4 += 1;
+
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
+ vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
+ vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
+ vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
+ vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567));
+ vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
+ vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF));
+ vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF));
+ vacc3x89ABCDEF = _mm256_add_ps(vacc3x89ABCDEF, _mm256_mul_ps(va3, vb89ABCDEF));
+ vacc4x89ABCDEF = _mm256_add_ps(vacc4x89ABCDEF, _mm256_mul_ps(va4, vb89ABCDEF));
+
+ k -= sizeof(float);
+ } while (k != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
+ vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+ vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
+ vacc4x89ABCDEF = _mm256_min_ps(vacc4x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
+ vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+ vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
+ vacc4x89ABCDEF = _mm256_max_ps(vacc4x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c4, vacc4x01234567);
+ _mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
+ c4 = (float*) ((uintptr_t) c4 + cn_stride);
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
+ c3 = (float*) ((uintptr_t) c3 + cn_stride);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a4 = (const float*) ((uintptr_t) a4 - kc);
+ a3 = (const float*) ((uintptr_t) a3 - kc);
+ a2 = (const float*) ((uintptr_t) a2 - kc);
+ a1 = (const float*) ((uintptr_t) a1 - kc);
+ a0 = (const float*) ((uintptr_t) a0 - kc);
+
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c4, vacc4x01234567);
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc4x01234567 = vacc4x89ABCDEF;
+ vacc3x01234567 = vacc3x89ABCDEF;
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c4 += 8;
+ c3 += 8;
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
+ __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c4, vacc4x0123);
+ _mm_storeu_ps(c3, vacc3x0123);
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
+ vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c4 += 4;
+ c3 += 4;
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c4, vacc4x0123);
+ _mm_storel_pi((__m64*) c3, vacc3x0123);
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+ vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c4 += 2;
+ c3 += 2;
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c4, vacc4x0123);
+ _mm_store_ss(c3, vacc3x0123);
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen-inc/5x16-fma3-broadcast.c b/src/f32-gemm/gen-inc/5x16-fma3-broadcast.c
new file mode 100644
index 0000000..d35ac82
--- /dev/null
+++ b/src/f32-gemm/gen-inc/5x16-fma3-broadcast.c
@@ -0,0 +1,233 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-gemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemminc_ukernel_5x16__fma3_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ const float*restrict a,
+ size_t a_stride,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ const float*restrict acc,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 5);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+ assert(acc != NULL);
+
+ const float* a0 = a;
+ float* c0 = c;
+ const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ a1 = a0;
+ c1 = c0;
+ }
+ const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ a2 = a1;
+ c2 = c1;
+ }
+ const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+ float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 4) {
+ a3 = a2;
+ c3 = c2;
+ }
+ const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+ float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 4) {
+ a4 = a3;
+ c4 = c3;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(acc + 0);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(acc + 8);
+ __m256 vacc1x01234567 = _mm256_load_ps(acc + 16);
+ __m256 vacc1x89ABCDEF = _mm256_load_ps(acc + 24);
+ __m256 vacc2x01234567 = _mm256_load_ps(acc + 32);
+ __m256 vacc2x89ABCDEF = _mm256_load_ps(acc + 40);
+ __m256 vacc3x01234567 = _mm256_load_ps(acc + 48);
+ __m256 vacc3x89ABCDEF = _mm256_load_ps(acc + 56);
+ __m256 vacc4x01234567 = _mm256_load_ps(acc + 64);
+ __m256 vacc4x89ABCDEF = _mm256_load_ps(acc + 72);
+ acc += 80;
+
+ size_t k = kc;
+ do {
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+ const __m256 va3 = _mm256_broadcast_ss(a3);
+ a3 += 1;
+ const __m256 va4 = _mm256_broadcast_ss(a4);
+ a4 += 1;
+
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
+ vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
+ vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
+ vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
+ vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
+ vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
+ vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
+ vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
+ vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
+ vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEF, vacc4x89ABCDEF);
+
+ k -= sizeof(float);
+ } while (k != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
+ vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+ vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
+ vacc4x89ABCDEF = _mm256_min_ps(vacc4x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
+ vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+ vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
+ vacc4x89ABCDEF = _mm256_max_ps(vacc4x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c4, vacc4x01234567);
+ _mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
+ c4 = (float*) ((uintptr_t) c4 + cn_stride);
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
+ c3 = (float*) ((uintptr_t) c3 + cn_stride);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a4 = (const float*) ((uintptr_t) a4 - kc);
+ a3 = (const float*) ((uintptr_t) a3 - kc);
+ a2 = (const float*) ((uintptr_t) a2 - kc);
+ a1 = (const float*) ((uintptr_t) a1 - kc);
+ a0 = (const float*) ((uintptr_t) a0 - kc);
+
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c4, vacc4x01234567);
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc4x01234567 = vacc4x89ABCDEF;
+ vacc3x01234567 = vacc3x89ABCDEF;
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c4 += 8;
+ c3 += 8;
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
+ __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c4, vacc4x0123);
+ _mm_storeu_ps(c3, vacc3x0123);
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
+ vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c4 += 4;
+ c3 += 4;
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c4, vacc4x0123);
+ _mm_storel_pi((__m64*) c3, vacc3x0123);
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+ vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c4 += 2;
+ c3 += 2;
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c4, vacc4x0123);
+ _mm_store_ss(c3, vacc3x0123);
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/1x16-avx-broadcast.c b/src/f32-gemm/gen/1x16-avx-broadcast.c
new file mode 100644
index 0000000..8dcbdd2
--- /dev/null
+++ b/src/f32-gemm/gen/1x16-avx-broadcast.c
@@ -0,0 +1,107 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-gemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_ukernel_1x16__avx_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ const float*restrict a,
+ size_t a_stride,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 1);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+
+ const float* a0 = a;
+ float* c0 = c;
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(w + 0);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ size_t k = kc;
+ do {
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
+ vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
+
+ k -= sizeof(float);
+ } while (k != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a0 = (const float*) ((uintptr_t) a0 - kc);
+
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c0 += 8;
+ }
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/1x16-fma3-broadcast.c b/src/f32-gemm/gen/1x16-fma3-broadcast.c
new file mode 100644
index 0000000..a8c9923
--- /dev/null
+++ b/src/f32-gemm/gen/1x16-fma3-broadcast.c
@@ -0,0 +1,107 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-gemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_ukernel_1x16__fma3_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ const float*restrict a,
+ size_t a_stride,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 1);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+
+ const float* a0 = a;
+ float* c0 = c;
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(w + 0);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ size_t k = kc;
+ do {
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
+ vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
+
+ k -= sizeof(float);
+ } while (k != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a0 = (const float*) ((uintptr_t) a0 - kc);
+
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c0 += 8;
+ }
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/3x16-avx-broadcast.c b/src/f32-gemm/gen/3x16-avx-broadcast.c
new file mode 100644
index 0000000..5d32c31
--- /dev/null
+++ b/src/f32-gemm/gen/3x16-avx-broadcast.c
@@ -0,0 +1,169 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-gemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_ukernel_3x16__avx_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ const float*restrict a,
+ size_t a_stride,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 3);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+
+ const float* a0 = a;
+ float* c0 = c;
+ const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ a1 = a0;
+ c1 = c0;
+ }
+ const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ a2 = a1;
+ c2 = c1;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(w + 0);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
+ __m256 vacc1x01234567 = vacc0x01234567;
+ __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc2x01234567 = vacc0x01234567;
+ __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
+ w += 16;
+
+ size_t k = kc;
+ do {
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
+ vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
+ vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
+ vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
+ vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF));
+ vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF));
+
+ k -= sizeof(float);
+ } while (k != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a2 = (const float*) ((uintptr_t) a2 - kc);
+ a1 = (const float*) ((uintptr_t) a1 - kc);
+ a0 = (const float*) ((uintptr_t) a0 - kc);
+
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/3x16-fma3-broadcast.c b/src/f32-gemm/gen/3x16-fma3-broadcast.c
new file mode 100644
index 0000000..ab8ed66
--- /dev/null
+++ b/src/f32-gemm/gen/3x16-fma3-broadcast.c
@@ -0,0 +1,169 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-gemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_ukernel_3x16__fma3_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ const float*restrict a,
+ size_t a_stride,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 3);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+
+ const float* a0 = a;
+ float* c0 = c;
+ const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ a1 = a0;
+ c1 = c0;
+ }
+ const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ a2 = a1;
+ c2 = c1;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(w + 0);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
+ __m256 vacc1x01234567 = vacc0x01234567;
+ __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc2x01234567 = vacc0x01234567;
+ __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
+ w += 16;
+
+ size_t k = kc;
+ do {
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
+ vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
+ vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
+ vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
+ vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
+ vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
+
+ k -= sizeof(float);
+ } while (k != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a2 = (const float*) ((uintptr_t) a2 - kc);
+ a1 = (const float*) ((uintptr_t) a1 - kc);
+ a0 = (const float*) ((uintptr_t) a0 - kc);
+
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/4x16-avx-broadcast.c b/src/f32-gemm/gen/4x16-avx-broadcast.c
new file mode 100644
index 0000000..342dbe8
--- /dev/null
+++ b/src/f32-gemm/gen/4x16-avx-broadcast.c
@@ -0,0 +1,200 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-gemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_ukernel_4x16__avx_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ const float*restrict a,
+ size_t a_stride,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 4);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+
+ const float* a0 = a;
+ float* c0 = c;
+ const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ a1 = a0;
+ c1 = c0;
+ }
+ const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ a2 = a1;
+ c2 = c1;
+ }
+ const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+ float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+ if XNN_UNPREDICTABLE(mr != 4) {
+ a3 = a2;
+ c3 = c2;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(w + 0);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
+ __m256 vacc1x01234567 = vacc0x01234567;
+ __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc2x01234567 = vacc0x01234567;
+ __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc3x01234567 = vacc0x01234567;
+ __m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
+ w += 16;
+
+ size_t k = kc;
+ do {
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+ const __m256 va3 = _mm256_broadcast_ss(a3);
+ a3 += 1;
+
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
+ vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
+ vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
+ vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
+ vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
+ vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF));
+ vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF));
+ vacc3x89ABCDEF = _mm256_add_ps(vacc3x89ABCDEF, _mm256_mul_ps(va3, vb89ABCDEF));
+
+ k -= sizeof(float);
+ } while (k != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+ vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+ vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
+ c3 = (float*) ((uintptr_t) c3 + cn_stride);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a3 = (const float*) ((uintptr_t) a3 - kc);
+ a2 = (const float*) ((uintptr_t) a2 - kc);
+ a1 = (const float*) ((uintptr_t) a1 - kc);
+ a0 = (const float*) ((uintptr_t) a0 - kc);
+
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc3x01234567 = vacc3x89ABCDEF;
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c3 += 8;
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c3, vacc3x0123);
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c3 += 4;
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c3, vacc3x0123);
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c3 += 2;
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c3, vacc3x0123);
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/4x16-fma3-broadcast.c b/src/f32-gemm/gen/4x16-fma3-broadcast.c
new file mode 100644
index 0000000..e1ac9a6
--- /dev/null
+++ b/src/f32-gemm/gen/4x16-fma3-broadcast.c
@@ -0,0 +1,200 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-gemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_ukernel_4x16__fma3_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ const float*restrict a,
+ size_t a_stride,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 4);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+
+ const float* a0 = a;
+ float* c0 = c;
+ const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ a1 = a0;
+ c1 = c0;
+ }
+ const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ a2 = a1;
+ c2 = c1;
+ }
+ const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+ float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+ if XNN_UNPREDICTABLE(mr != 4) {
+ a3 = a2;
+ c3 = c2;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(w + 0);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
+ __m256 vacc1x01234567 = vacc0x01234567;
+ __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc2x01234567 = vacc0x01234567;
+ __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc3x01234567 = vacc0x01234567;
+ __m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
+ w += 16;
+
+ size_t k = kc;
+ do {
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+ const __m256 va3 = _mm256_broadcast_ss(a3);
+ a3 += 1;
+
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
+ vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
+ vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
+ vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
+ vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
+ vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
+ vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
+ vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
+
+ k -= sizeof(float);
+ } while (k != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+ vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+ vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
+ c3 = (float*) ((uintptr_t) c3 + cn_stride);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a3 = (const float*) ((uintptr_t) a3 - kc);
+ a2 = (const float*) ((uintptr_t) a2 - kc);
+ a1 = (const float*) ((uintptr_t) a1 - kc);
+ a0 = (const float*) ((uintptr_t) a0 - kc);
+
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc3x01234567 = vacc3x89ABCDEF;
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c3 += 8;
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c3, vacc3x0123);
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c3 += 4;
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c3, vacc3x0123);
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c3 += 2;
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c3, vacc3x0123);
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/5x16-avx-broadcast.c b/src/f32-gemm/gen/5x16-avx-broadcast.c
new file mode 100644
index 0000000..deb33bf
--- /dev/null
+++ b/src/f32-gemm/gen/5x16-avx-broadcast.c
@@ -0,0 +1,231 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-gemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_ukernel_5x16__avx_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ const float*restrict a,
+ size_t a_stride,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 5);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+
+ const float* a0 = a;
+ float* c0 = c;
+ const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ a1 = a0;
+ c1 = c0;
+ }
+ const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ a2 = a1;
+ c2 = c1;
+ }
+ const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+ float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 4) {
+ a3 = a2;
+ c3 = c2;
+ }
+ const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+ float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 4) {
+ a4 = a3;
+ c4 = c3;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(w + 0);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
+ __m256 vacc1x01234567 = vacc0x01234567;
+ __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc2x01234567 = vacc0x01234567;
+ __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc3x01234567 = vacc0x01234567;
+ __m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc4x01234567 = vacc0x01234567;
+ __m256 vacc4x89ABCDEF = vacc0x89ABCDEF;
+ w += 16;
+
+ size_t k = kc;
+ do {
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+ const __m256 va3 = _mm256_broadcast_ss(a3);
+ a3 += 1;
+ const __m256 va4 = _mm256_broadcast_ss(a4);
+ a4 += 1;
+
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
+ vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
+ vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
+ vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
+ vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567));
+ vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
+ vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF));
+ vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF));
+ vacc3x89ABCDEF = _mm256_add_ps(vacc3x89ABCDEF, _mm256_mul_ps(va3, vb89ABCDEF));
+ vacc4x89ABCDEF = _mm256_add_ps(vacc4x89ABCDEF, _mm256_mul_ps(va4, vb89ABCDEF));
+
+ k -= sizeof(float);
+ } while (k != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
+ vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+ vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
+ vacc4x89ABCDEF = _mm256_min_ps(vacc4x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
+ vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+ vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
+ vacc4x89ABCDEF = _mm256_max_ps(vacc4x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c4, vacc4x01234567);
+ _mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
+ c4 = (float*) ((uintptr_t) c4 + cn_stride);
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
+ c3 = (float*) ((uintptr_t) c3 + cn_stride);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a4 = (const float*) ((uintptr_t) a4 - kc);
+ a3 = (const float*) ((uintptr_t) a3 - kc);
+ a2 = (const float*) ((uintptr_t) a2 - kc);
+ a1 = (const float*) ((uintptr_t) a1 - kc);
+ a0 = (const float*) ((uintptr_t) a0 - kc);
+
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c4, vacc4x01234567);
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc4x01234567 = vacc4x89ABCDEF;
+ vacc3x01234567 = vacc3x89ABCDEF;
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c4 += 8;
+ c3 += 8;
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
+ __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c4, vacc4x0123);
+ _mm_storeu_ps(c3, vacc3x0123);
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
+ vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c4 += 4;
+ c3 += 4;
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c4, vacc4x0123);
+ _mm_storel_pi((__m64*) c3, vacc3x0123);
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+ vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c4 += 2;
+ c3 += 2;
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c4, vacc4x0123);
+ _mm_store_ss(c3, vacc3x0123);
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-gemm/gen/5x16-fma3-broadcast.c b/src/f32-gemm/gen/5x16-fma3-broadcast.c
new file mode 100644
index 0000000..3e1d460
--- /dev/null
+++ b/src/f32-gemm/gen/5x16-fma3-broadcast.c
@@ -0,0 +1,231 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-gemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/gemm.h>
+
+
+void xnn_f32_gemm_ukernel_5x16__fma3_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ const float*restrict a,
+ size_t a_stride,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 5);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+
+ const float* a0 = a;
+ float* c0 = c;
+ const float* a1 = (const float*) ((uintptr_t) a0 + a_stride);
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ a1 = a0;
+ c1 = c0;
+ }
+ const float* a2 = (const float*) ((uintptr_t) a1 + a_stride);
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ a2 = a1;
+ c2 = c1;
+ }
+ const float* a3 = (const float*) ((uintptr_t) a2 + a_stride);
+ float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 4) {
+ a3 = a2;
+ c3 = c2;
+ }
+ const float* a4 = (const float*) ((uintptr_t) a3 + a_stride);
+ float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 4) {
+ a4 = a3;
+ c4 = c3;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(w + 0);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
+ __m256 vacc1x01234567 = vacc0x01234567;
+ __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc2x01234567 = vacc0x01234567;
+ __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc3x01234567 = vacc0x01234567;
+ __m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc4x01234567 = vacc0x01234567;
+ __m256 vacc4x89ABCDEF = vacc0x89ABCDEF;
+ w += 16;
+
+ size_t k = kc;
+ do {
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+ const __m256 va3 = _mm256_broadcast_ss(a3);
+ a3 += 1;
+ const __m256 va4 = _mm256_broadcast_ss(a4);
+ a4 += 1;
+
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
+ vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
+ vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
+ vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
+ vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
+ vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
+ vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
+ vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
+ vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
+ vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEF, vacc4x89ABCDEF);
+
+ k -= sizeof(float);
+ } while (k != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
+ vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+ vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
+ vacc4x89ABCDEF = _mm256_min_ps(vacc4x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
+ vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+ vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
+ vacc4x89ABCDEF = _mm256_max_ps(vacc4x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c4, vacc4x01234567);
+ _mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
+ c4 = (float*) ((uintptr_t) c4 + cn_stride);
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
+ c3 = (float*) ((uintptr_t) c3 + cn_stride);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a4 = (const float*) ((uintptr_t) a4 - kc);
+ a3 = (const float*) ((uintptr_t) a3 - kc);
+ a2 = (const float*) ((uintptr_t) a2 - kc);
+ a1 = (const float*) ((uintptr_t) a1 - kc);
+ a0 = (const float*) ((uintptr_t) a0 - kc);
+
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c4, vacc4x01234567);
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc4x01234567 = vacc4x89ABCDEF;
+ vacc3x01234567 = vacc3x89ABCDEF;
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c4 += 8;
+ c3 += 8;
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
+ __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c4, vacc4x0123);
+ _mm_storeu_ps(c3, vacc3x0123);
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
+ vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c4 += 4;
+ c3 += 4;
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c4, vacc4x0123);
+ _mm_storel_pi((__m64*) c3, vacc3x0123);
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+ vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c4 += 2;
+ c3 += 2;
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c4, vacc4x0123);
+ _mm_store_ss(c3, vacc3x0123);
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-igemm/avx-broadcast.c.in b/src/f32-igemm/avx-broadcast.c.in
index 72215d5..28f279a 100644
--- a/src/f32-igemm/avx-broadcast.c.in
+++ b/src/f32-igemm/avx-broadcast.c.in
@@ -121,13 +121,13 @@
if (nc & ${1 << LOG2N}) {
$if LOG2N >= 3:
$for M in reversed(range(MR)):
- _mm_storeu_ps(c${M}, vacc${M}x${ABC[0:4]});
- $for N in range(4, 1 << LOG2N, 4):
- _mm_storeu_ps(c${M} + ${N}, vacc${M}x${ABC[N:N+4]});
+ _mm256_storeu_ps(c${M}, vacc${M}x${ABC[0:8]});
+ $for N in range(8, 1 << LOG2N, 8):
+ _mm256_storeu_ps(c${M} + ${N}, vacc${M}x${ABC[N:N+8]});
$for M in reversed(range(MR)):
- $for N in range(0, 1 << (LOG2N - 1), 4):
- vacc${M}x${ABC[N:N+4]} = vacc${M}x${ABC[N + (1 << LOG2N):N + (1 << LOG2N)+4]};
+ $for N in range(0, 1 << (LOG2N - 1), 8):
+ vacc${M}x${ABC[N:N+8]} = vacc${M}x${ABC[N + (1 << LOG2N):N + (1 << LOG2N)+8]};
$for M in reversed(range(MR)):
c${M} += ${1 << LOG2N};
diff --git a/src/f32-igemm/gen/1x16-avx-broadcast.c b/src/f32-igemm/gen/1x16-avx-broadcast.c
new file mode 100644
index 0000000..86a8ee1
--- /dev/null
+++ b/src/f32-igemm/gen/1x16-avx-broadcast.c
@@ -0,0 +1,120 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-igemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_1x16__avx_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ size_t ks,
+ const float**restrict a,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ size_t a_offset,
+ const float* zero,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 1);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(ks != 0);
+ assert(ks % (1 * sizeof(void*)) == 0);
+ assert(a_offset % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+
+ float* c0 = c;
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(w);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ size_t p = ks;
+ do {
+ const float* restrict a0 = a[0];
+ assert(a0 != NULL);
+ if XNN_UNPREDICTABLE(a0 != zero) {
+ a0 = (const float*) ((uintptr_t) a0 + a_offset);
+ }
+ a += 1;
+
+ size_t k = kc;
+ do {
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+
+ vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
+ vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
+ k -= sizeof(float);
+ } while (k != 0);
+ p -= 1 * sizeof(void*);
+ } while (p != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a = (const float**restrict) ((uintptr_t) a - ks);
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c0 += 8;
+ }
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/1x16-fma3-broadcast.c b/src/f32-igemm/gen/1x16-fma3-broadcast.c
new file mode 100644
index 0000000..faa086b
--- /dev/null
+++ b/src/f32-igemm/gen/1x16-fma3-broadcast.c
@@ -0,0 +1,120 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-igemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_1x16__fma3_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ size_t ks,
+ const float**restrict a,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ size_t a_offset,
+ const float* zero,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 1);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(ks != 0);
+ assert(ks % (1 * sizeof(void*)) == 0);
+ assert(a_offset % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+
+ float* c0 = c;
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(w);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ size_t p = ks;
+ do {
+ const float* restrict a0 = a[0];
+ assert(a0 != NULL);
+ if XNN_UNPREDICTABLE(a0 != zero) {
+ a0 = (const float*) ((uintptr_t) a0 + a_offset);
+ }
+ a += 1;
+
+ size_t k = kc;
+ do {
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+
+ vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
+ vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
+ k -= sizeof(float);
+ } while (k != 0);
+ p -= 1 * sizeof(void*);
+ } while (p != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a = (const float**restrict) ((uintptr_t) a - ks);
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c0 += 8;
+ }
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/3x16-avx-broadcast.c b/src/f32-igemm/gen/3x16-avx-broadcast.c
new file mode 100644
index 0000000..5cda186
--- /dev/null
+++ b/src/f32-igemm/gen/3x16-avx-broadcast.c
@@ -0,0 +1,186 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-igemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_3x16__avx_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ size_t ks,
+ const float**restrict a,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ size_t a_offset,
+ const float* zero,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 3);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(ks != 0);
+ assert(ks % (3 * sizeof(void*)) == 0);
+ assert(a_offset % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+
+ float* c0 = c;
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ c1 = c0;
+ }
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ c2 = c1;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(w);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
+ __m256 vacc1x01234567 = vacc0x01234567;
+ __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc2x01234567 = vacc0x01234567;
+ __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
+ w += 16;
+
+ size_t p = ks;
+ do {
+ const float* restrict a0 = a[0];
+ assert(a0 != NULL);
+ if XNN_UNPREDICTABLE(a0 != zero) {
+ a0 = (const float*) ((uintptr_t) a0 + a_offset);
+ }
+ const float* restrict a1 = a[1];
+ assert(a1 != NULL);
+ if XNN_UNPREDICTABLE(a1 != zero) {
+ a1 = (const float*) ((uintptr_t) a1 + a_offset);
+ }
+ const float* restrict a2 = a[2];
+ assert(a2 != NULL);
+ if XNN_UNPREDICTABLE(a2 != zero) {
+ a2 = (const float*) ((uintptr_t) a2 + a_offset);
+ }
+ a += 3;
+
+ size_t k = kc;
+ do {
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+
+ vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
+ vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
+ vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
+ vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF));
+ vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
+ vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF));
+ k -= sizeof(float);
+ } while (k != 0);
+ p -= 3 * sizeof(void*);
+ } while (p != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a = (const float**restrict) ((uintptr_t) a - ks);
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/3x16-fma3-broadcast.c b/src/f32-igemm/gen/3x16-fma3-broadcast.c
new file mode 100644
index 0000000..3e16f46
--- /dev/null
+++ b/src/f32-igemm/gen/3x16-fma3-broadcast.c
@@ -0,0 +1,186 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-igemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_3x16__fma3_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ size_t ks,
+ const float**restrict a,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ size_t a_offset,
+ const float* zero,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 3);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(ks != 0);
+ assert(ks % (3 * sizeof(void*)) == 0);
+ assert(a_offset % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+
+ float* c0 = c;
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ c1 = c0;
+ }
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ c2 = c1;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(w);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
+ __m256 vacc1x01234567 = vacc0x01234567;
+ __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc2x01234567 = vacc0x01234567;
+ __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
+ w += 16;
+
+ size_t p = ks;
+ do {
+ const float* restrict a0 = a[0];
+ assert(a0 != NULL);
+ if XNN_UNPREDICTABLE(a0 != zero) {
+ a0 = (const float*) ((uintptr_t) a0 + a_offset);
+ }
+ const float* restrict a1 = a[1];
+ assert(a1 != NULL);
+ if XNN_UNPREDICTABLE(a1 != zero) {
+ a1 = (const float*) ((uintptr_t) a1 + a_offset);
+ }
+ const float* restrict a2 = a[2];
+ assert(a2 != NULL);
+ if XNN_UNPREDICTABLE(a2 != zero) {
+ a2 = (const float*) ((uintptr_t) a2 + a_offset);
+ }
+ a += 3;
+
+ size_t k = kc;
+ do {
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+
+ vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
+ vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
+ vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
+ vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
+ vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
+ vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
+ k -= sizeof(float);
+ } while (k != 0);
+ p -= 3 * sizeof(void*);
+ } while (p != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a = (const float**restrict) ((uintptr_t) a - ks);
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/4x16-avx-broadcast.c b/src/f32-igemm/gen/4x16-avx-broadcast.c
new file mode 100644
index 0000000..a9e4739
--- /dev/null
+++ b/src/f32-igemm/gen/4x16-avx-broadcast.c
@@ -0,0 +1,219 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-igemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_4x16__avx_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ size_t ks,
+ const float**restrict a,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ size_t a_offset,
+ const float* zero,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 4);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(ks != 0);
+ assert(ks % (4 * sizeof(void*)) == 0);
+ assert(a_offset % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+
+ float* c0 = c;
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ c1 = c0;
+ }
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ c2 = c1;
+ }
+ float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+ if XNN_UNPREDICTABLE(mr != 4) {
+ c3 = c2;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(w);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
+ __m256 vacc1x01234567 = vacc0x01234567;
+ __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc2x01234567 = vacc0x01234567;
+ __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc3x01234567 = vacc0x01234567;
+ __m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
+ w += 16;
+
+ size_t p = ks;
+ do {
+ const float* restrict a0 = a[0];
+ assert(a0 != NULL);
+ if XNN_UNPREDICTABLE(a0 != zero) {
+ a0 = (const float*) ((uintptr_t) a0 + a_offset);
+ }
+ const float* restrict a1 = a[1];
+ assert(a1 != NULL);
+ if XNN_UNPREDICTABLE(a1 != zero) {
+ a1 = (const float*) ((uintptr_t) a1 + a_offset);
+ }
+ const float* restrict a2 = a[2];
+ assert(a2 != NULL);
+ if XNN_UNPREDICTABLE(a2 != zero) {
+ a2 = (const float*) ((uintptr_t) a2 + a_offset);
+ }
+ const float* restrict a3 = a[3];
+ assert(a3 != NULL);
+ if XNN_UNPREDICTABLE(a3 != zero) {
+ a3 = (const float*) ((uintptr_t) a3 + a_offset);
+ }
+ a += 4;
+
+ size_t k = kc;
+ do {
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+ const __m256 va3 = _mm256_broadcast_ss(a3);
+ a3 += 1;
+
+ vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
+ vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
+ vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
+ vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF));
+ vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
+ vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF));
+ vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
+ vacc3x89ABCDEF = _mm256_add_ps(vacc3x89ABCDEF, _mm256_mul_ps(va3, vb89ABCDEF));
+ k -= sizeof(float);
+ } while (k != 0);
+ p -= 4 * sizeof(void*);
+ } while (p != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+ vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+ vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
+ c3 = (float*) ((uintptr_t) c3 + cn_stride);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a = (const float**restrict) ((uintptr_t) a - ks);
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc3x01234567 = vacc3x89ABCDEF;
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c3 += 8;
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c3, vacc3x0123);
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c3 += 4;
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c3, vacc3x0123);
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c3 += 2;
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c3, vacc3x0123);
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/4x16-fma3-broadcast.c b/src/f32-igemm/gen/4x16-fma3-broadcast.c
new file mode 100644
index 0000000..874f1bc
--- /dev/null
+++ b/src/f32-igemm/gen/4x16-fma3-broadcast.c
@@ -0,0 +1,219 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-igemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_4x16__fma3_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ size_t ks,
+ const float**restrict a,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ size_t a_offset,
+ const float* zero,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 4);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(ks != 0);
+ assert(ks % (4 * sizeof(void*)) == 0);
+ assert(a_offset % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+
+ float* c0 = c;
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ c1 = c0;
+ }
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ c2 = c1;
+ }
+ float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+ if XNN_UNPREDICTABLE(mr != 4) {
+ c3 = c2;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(w);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
+ __m256 vacc1x01234567 = vacc0x01234567;
+ __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc2x01234567 = vacc0x01234567;
+ __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc3x01234567 = vacc0x01234567;
+ __m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
+ w += 16;
+
+ size_t p = ks;
+ do {
+ const float* restrict a0 = a[0];
+ assert(a0 != NULL);
+ if XNN_UNPREDICTABLE(a0 != zero) {
+ a0 = (const float*) ((uintptr_t) a0 + a_offset);
+ }
+ const float* restrict a1 = a[1];
+ assert(a1 != NULL);
+ if XNN_UNPREDICTABLE(a1 != zero) {
+ a1 = (const float*) ((uintptr_t) a1 + a_offset);
+ }
+ const float* restrict a2 = a[2];
+ assert(a2 != NULL);
+ if XNN_UNPREDICTABLE(a2 != zero) {
+ a2 = (const float*) ((uintptr_t) a2 + a_offset);
+ }
+ const float* restrict a3 = a[3];
+ assert(a3 != NULL);
+ if XNN_UNPREDICTABLE(a3 != zero) {
+ a3 = (const float*) ((uintptr_t) a3 + a_offset);
+ }
+ a += 4;
+
+ size_t k = kc;
+ do {
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+ const __m256 va3 = _mm256_broadcast_ss(a3);
+ a3 += 1;
+
+ vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
+ vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
+ vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
+ vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
+ vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
+ vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
+ vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
+ vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
+ k -= sizeof(float);
+ } while (k != 0);
+ p -= 4 * sizeof(void*);
+ } while (p != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+ vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+ vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
+ c3 = (float*) ((uintptr_t) c3 + cn_stride);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a = (const float**restrict) ((uintptr_t) a - ks);
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc3x01234567 = vacc3x89ABCDEF;
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c3 += 8;
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c3, vacc3x0123);
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c3 += 4;
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c3, vacc3x0123);
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c3 += 2;
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c3, vacc3x0123);
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/5x16-avx-broadcast.c b/src/f32-igemm/gen/5x16-avx-broadcast.c
new file mode 100644
index 0000000..4336c8e
--- /dev/null
+++ b/src/f32-igemm/gen/5x16-avx-broadcast.c
@@ -0,0 +1,252 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-igemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_5x16__avx_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ size_t ks,
+ const float**restrict a,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ size_t a_offset,
+ const float* zero,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 5);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(ks != 0);
+ assert(ks % (5 * sizeof(void*)) == 0);
+ assert(a_offset % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+
+ float* c0 = c;
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ c1 = c0;
+ }
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ c2 = c1;
+ }
+ float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 4) {
+ c3 = c2;
+ }
+ float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 4) {
+ c4 = c3;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(w);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
+ __m256 vacc1x01234567 = vacc0x01234567;
+ __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc2x01234567 = vacc0x01234567;
+ __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc3x01234567 = vacc0x01234567;
+ __m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc4x01234567 = vacc0x01234567;
+ __m256 vacc4x89ABCDEF = vacc0x89ABCDEF;
+ w += 16;
+
+ size_t p = ks;
+ do {
+ const float* restrict a0 = a[0];
+ assert(a0 != NULL);
+ if XNN_UNPREDICTABLE(a0 != zero) {
+ a0 = (const float*) ((uintptr_t) a0 + a_offset);
+ }
+ const float* restrict a1 = a[1];
+ assert(a1 != NULL);
+ if XNN_UNPREDICTABLE(a1 != zero) {
+ a1 = (const float*) ((uintptr_t) a1 + a_offset);
+ }
+ const float* restrict a2 = a[2];
+ assert(a2 != NULL);
+ if XNN_UNPREDICTABLE(a2 != zero) {
+ a2 = (const float*) ((uintptr_t) a2 + a_offset);
+ }
+ const float* restrict a3 = a[3];
+ assert(a3 != NULL);
+ if XNN_UNPREDICTABLE(a3 != zero) {
+ a3 = (const float*) ((uintptr_t) a3 + a_offset);
+ }
+ const float* restrict a4 = a[4];
+ assert(a4 != NULL);
+ if XNN_UNPREDICTABLE(a4 != zero) {
+ a4 = (const float*) ((uintptr_t) a4 + a_offset);
+ }
+ a += 5;
+
+ size_t k = kc;
+ do {
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+ const __m256 va3 = _mm256_broadcast_ss(a3);
+ a3 += 1;
+ const __m256 va4 = _mm256_broadcast_ss(a4);
+ a4 += 1;
+
+ vacc0x01234567 = _mm256_add_ps(vacc0x01234567, _mm256_mul_ps(va0, vb01234567));
+ vacc0x89ABCDEF = _mm256_add_ps(vacc0x89ABCDEF, _mm256_mul_ps(va0, vb89ABCDEF));
+ vacc1x01234567 = _mm256_add_ps(vacc1x01234567, _mm256_mul_ps(va1, vb01234567));
+ vacc1x89ABCDEF = _mm256_add_ps(vacc1x89ABCDEF, _mm256_mul_ps(va1, vb89ABCDEF));
+ vacc2x01234567 = _mm256_add_ps(vacc2x01234567, _mm256_mul_ps(va2, vb01234567));
+ vacc2x89ABCDEF = _mm256_add_ps(vacc2x89ABCDEF, _mm256_mul_ps(va2, vb89ABCDEF));
+ vacc3x01234567 = _mm256_add_ps(vacc3x01234567, _mm256_mul_ps(va3, vb01234567));
+ vacc3x89ABCDEF = _mm256_add_ps(vacc3x89ABCDEF, _mm256_mul_ps(va3, vb89ABCDEF));
+ vacc4x01234567 = _mm256_add_ps(vacc4x01234567, _mm256_mul_ps(va4, vb01234567));
+ vacc4x89ABCDEF = _mm256_add_ps(vacc4x89ABCDEF, _mm256_mul_ps(va4, vb89ABCDEF));
+ k -= sizeof(float);
+ } while (k != 0);
+ p -= 5 * sizeof(void*);
+ } while (p != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
+ vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+ vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
+ vacc4x89ABCDEF = _mm256_min_ps(vacc4x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
+ vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+ vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
+ vacc4x89ABCDEF = _mm256_max_ps(vacc4x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c4, vacc4x01234567);
+ _mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
+ c4 = (float*) ((uintptr_t) c4 + cn_stride);
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
+ c3 = (float*) ((uintptr_t) c3 + cn_stride);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a = (const float**restrict) ((uintptr_t) a - ks);
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c4, vacc4x01234567);
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc4x01234567 = vacc4x89ABCDEF;
+ vacc3x01234567 = vacc3x89ABCDEF;
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c4 += 8;
+ c3 += 8;
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
+ __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c4, vacc4x0123);
+ _mm_storeu_ps(c3, vacc3x0123);
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
+ vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c4 += 4;
+ c3 += 4;
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c4, vacc4x0123);
+ _mm_storel_pi((__m64*) c3, vacc3x0123);
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+ vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c4 += 2;
+ c3 += 2;
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c4, vacc4x0123);
+ _mm_store_ss(c3, vacc3x0123);
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/f32-igemm/gen/5x16-fma3-broadcast.c b/src/f32-igemm/gen/5x16-fma3-broadcast.c
new file mode 100644
index 0000000..471f871
--- /dev/null
+++ b/src/f32-igemm/gen/5x16-fma3-broadcast.c
@@ -0,0 +1,252 @@
+// Auto-generated file. Do not edit!
+// Template: src/f32-igemm/avx-broadcast.c.in
+// Generator: tools/xngen
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <immintrin.h>
+
+#include <xnnpack/igemm.h>
+
+
+void xnn_f32_igemm_ukernel_5x16__fma3_broadcast(
+ size_t mr,
+ size_t nc,
+ size_t kc,
+ size_t ks,
+ const float**restrict a,
+ const float*restrict w,
+ float*restrict c,
+ size_t cm_stride,
+ size_t cn_stride,
+ size_t a_offset,
+ const float* zero,
+ const union xnn_f32_output_params params[restrict static 1])
+{
+ assert(mr != 0);
+ assert(mr <= 5);
+ assert(nc != 0);
+ assert(kc != 0);
+ assert(kc % sizeof(float) == 0);
+ assert(ks != 0);
+ assert(ks % (5 * sizeof(void*)) == 0);
+ assert(a_offset % sizeof(float) == 0);
+ assert(a != NULL);
+ assert(w != NULL);
+ assert(c != NULL);
+
+ float* c0 = c;
+ float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 2) {
+ c1 = c0;
+ }
+ float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 2) {
+ c2 = c1;
+ }
+ float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
+ if XNN_UNPREDICTABLE(mr < 4) {
+ c3 = c2;
+ }
+ float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
+ if XNN_UNPREDICTABLE(mr <= 4) {
+ c4 = c3;
+ }
+
+ do {
+ __m256 vacc0x01234567 = _mm256_load_ps(w);
+ __m256 vacc0x89ABCDEF = _mm256_load_ps(w + 8);
+ __m256 vacc1x01234567 = vacc0x01234567;
+ __m256 vacc1x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc2x01234567 = vacc0x01234567;
+ __m256 vacc2x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc3x01234567 = vacc0x01234567;
+ __m256 vacc3x89ABCDEF = vacc0x89ABCDEF;
+ __m256 vacc4x01234567 = vacc0x01234567;
+ __m256 vacc4x89ABCDEF = vacc0x89ABCDEF;
+ w += 16;
+
+ size_t p = ks;
+ do {
+ const float* restrict a0 = a[0];
+ assert(a0 != NULL);
+ if XNN_UNPREDICTABLE(a0 != zero) {
+ a0 = (const float*) ((uintptr_t) a0 + a_offset);
+ }
+ const float* restrict a1 = a[1];
+ assert(a1 != NULL);
+ if XNN_UNPREDICTABLE(a1 != zero) {
+ a1 = (const float*) ((uintptr_t) a1 + a_offset);
+ }
+ const float* restrict a2 = a[2];
+ assert(a2 != NULL);
+ if XNN_UNPREDICTABLE(a2 != zero) {
+ a2 = (const float*) ((uintptr_t) a2 + a_offset);
+ }
+ const float* restrict a3 = a[3];
+ assert(a3 != NULL);
+ if XNN_UNPREDICTABLE(a3 != zero) {
+ a3 = (const float*) ((uintptr_t) a3 + a_offset);
+ }
+ const float* restrict a4 = a[4];
+ assert(a4 != NULL);
+ if XNN_UNPREDICTABLE(a4 != zero) {
+ a4 = (const float*) ((uintptr_t) a4 + a_offset);
+ }
+ a += 5;
+
+ size_t k = kc;
+ do {
+ const __m256 vb01234567 = _mm256_load_ps(w);
+ const __m256 vb89ABCDEF = _mm256_load_ps(w + 8);
+ w += 16;
+
+ const __m256 va0 = _mm256_broadcast_ss(a0);
+ a0 += 1;
+ const __m256 va1 = _mm256_broadcast_ss(a1);
+ a1 += 1;
+ const __m256 va2 = _mm256_broadcast_ss(a2);
+ a2 += 1;
+ const __m256 va3 = _mm256_broadcast_ss(a3);
+ a3 += 1;
+ const __m256 va4 = _mm256_broadcast_ss(a4);
+ a4 += 1;
+
+ vacc0x01234567 = _mm256_fmadd_ps(va0, vb01234567, vacc0x01234567);
+ vacc0x89ABCDEF = _mm256_fmadd_ps(va0, vb89ABCDEF, vacc0x89ABCDEF);
+ vacc1x01234567 = _mm256_fmadd_ps(va1, vb01234567, vacc1x01234567);
+ vacc1x89ABCDEF = _mm256_fmadd_ps(va1, vb89ABCDEF, vacc1x89ABCDEF);
+ vacc2x01234567 = _mm256_fmadd_ps(va2, vb01234567, vacc2x01234567);
+ vacc2x89ABCDEF = _mm256_fmadd_ps(va2, vb89ABCDEF, vacc2x89ABCDEF);
+ vacc3x01234567 = _mm256_fmadd_ps(va3, vb01234567, vacc3x01234567);
+ vacc3x89ABCDEF = _mm256_fmadd_ps(va3, vb89ABCDEF, vacc3x89ABCDEF);
+ vacc4x01234567 = _mm256_fmadd_ps(va4, vb01234567, vacc4x01234567);
+ vacc4x89ABCDEF = _mm256_fmadd_ps(va4, vb89ABCDEF, vacc4x89ABCDEF);
+ k -= sizeof(float);
+ } while (k != 0);
+ p -= 5 * sizeof(void*);
+ } while (p != 0);
+
+ const __m256 vmax = _mm256_broadcast_ps((const __m128*) params->sse.max);
+ vacc0x01234567 = _mm256_min_ps(vacc0x01234567, vmax);
+ vacc1x01234567 = _mm256_min_ps(vacc1x01234567, vmax);
+ vacc2x01234567 = _mm256_min_ps(vacc2x01234567, vmax);
+ vacc3x01234567 = _mm256_min_ps(vacc3x01234567, vmax);
+ vacc4x01234567 = _mm256_min_ps(vacc4x01234567, vmax);
+ vacc0x89ABCDEF = _mm256_min_ps(vacc0x89ABCDEF, vmax);
+ vacc1x89ABCDEF = _mm256_min_ps(vacc1x89ABCDEF, vmax);
+ vacc2x89ABCDEF = _mm256_min_ps(vacc2x89ABCDEF, vmax);
+ vacc3x89ABCDEF = _mm256_min_ps(vacc3x89ABCDEF, vmax);
+ vacc4x89ABCDEF = _mm256_min_ps(vacc4x89ABCDEF, vmax);
+
+ const __m256 vmin = _mm256_broadcast_ps((const __m128*) params->sse.min);
+ vacc0x01234567 = _mm256_max_ps(vacc0x01234567, vmin);
+ vacc1x01234567 = _mm256_max_ps(vacc1x01234567, vmin);
+ vacc2x01234567 = _mm256_max_ps(vacc2x01234567, vmin);
+ vacc3x01234567 = _mm256_max_ps(vacc3x01234567, vmin);
+ vacc4x01234567 = _mm256_max_ps(vacc4x01234567, vmin);
+ vacc0x89ABCDEF = _mm256_max_ps(vacc0x89ABCDEF, vmin);
+ vacc1x89ABCDEF = _mm256_max_ps(vacc1x89ABCDEF, vmin);
+ vacc2x89ABCDEF = _mm256_max_ps(vacc2x89ABCDEF, vmin);
+ vacc3x89ABCDEF = _mm256_max_ps(vacc3x89ABCDEF, vmin);
+ vacc4x89ABCDEF = _mm256_max_ps(vacc4x89ABCDEF, vmin);
+
+ if XNN_LIKELY(nc >= 16) {
+ _mm256_storeu_ps(c4, vacc4x01234567);
+ _mm256_storeu_ps(c4 + 8, vacc4x89ABCDEF);
+ c4 = (float*) ((uintptr_t) c4 + cn_stride);
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c3 + 8, vacc3x89ABCDEF);
+ c3 = (float*) ((uintptr_t) c3 + cn_stride);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c2 + 8, vacc2x89ABCDEF);
+ c2 = (float*) ((uintptr_t) c2 + cn_stride);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c1 + 8, vacc1x89ABCDEF);
+ c1 = (float*) ((uintptr_t) c1 + cn_stride);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+ _mm256_storeu_ps(c0 + 8, vacc0x89ABCDEF);
+ c0 = (float*) ((uintptr_t) c0 + cn_stride);
+
+ a = (const float**restrict) ((uintptr_t) a - ks);
+ nc -= 16;
+ } else {
+ if (nc & 8) {
+ _mm256_storeu_ps(c4, vacc4x01234567);
+ _mm256_storeu_ps(c3, vacc3x01234567);
+ _mm256_storeu_ps(c2, vacc2x01234567);
+ _mm256_storeu_ps(c1, vacc1x01234567);
+ _mm256_storeu_ps(c0, vacc0x01234567);
+
+ vacc4x01234567 = vacc4x89ABCDEF;
+ vacc3x01234567 = vacc3x89ABCDEF;
+ vacc2x01234567 = vacc2x89ABCDEF;
+ vacc1x01234567 = vacc1x89ABCDEF;
+ vacc0x01234567 = vacc0x89ABCDEF;
+
+ c4 += 8;
+ c3 += 8;
+ c2 += 8;
+ c1 += 8;
+ c0 += 8;
+ }
+ __m128 vacc4x0123 = _mm256_castps256_ps128(vacc4x01234567);
+ __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567);
+ __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567);
+ __m128 vacc1x0123 = _mm256_castps256_ps128(vacc1x01234567);
+ __m128 vacc0x0123 = _mm256_castps256_ps128(vacc0x01234567);
+ if (nc & 4) {
+ _mm_storeu_ps(c4, vacc4x0123);
+ _mm_storeu_ps(c3, vacc3x0123);
+ _mm_storeu_ps(c2, vacc2x0123);
+ _mm_storeu_ps(c1, vacc1x0123);
+ _mm_storeu_ps(c0, vacc0x0123);
+
+ vacc4x0123 = _mm256_extractf128_ps(vacc4x01234567, 1);
+ vacc3x0123 = _mm256_extractf128_ps(vacc3x01234567, 1);
+ vacc2x0123 = _mm256_extractf128_ps(vacc2x01234567, 1);
+ vacc1x0123 = _mm256_extractf128_ps(vacc1x01234567, 1);
+ vacc0x0123 = _mm256_extractf128_ps(vacc0x01234567, 1);
+
+ c4 += 4;
+ c3 += 4;
+ c2 += 4;
+ c1 += 4;
+ c0 += 4;
+ }
+ if (nc & 2) {
+ _mm_storel_pi((__m64*) c4, vacc4x0123);
+ _mm_storel_pi((__m64*) c3, vacc3x0123);
+ _mm_storel_pi((__m64*) c2, vacc2x0123);
+ _mm_storel_pi((__m64*) c1, vacc1x0123);
+ _mm_storel_pi((__m64*) c0, vacc0x0123);
+
+ vacc4x0123 = _mm_movehl_ps(vacc4x0123, vacc4x0123);
+ vacc3x0123 = _mm_movehl_ps(vacc3x0123, vacc3x0123);
+ vacc2x0123 = _mm_movehl_ps(vacc2x0123, vacc2x0123);
+ vacc1x0123 = _mm_movehl_ps(vacc1x0123, vacc1x0123);
+ vacc0x0123 = _mm_movehl_ps(vacc0x0123, vacc0x0123);
+
+ c4 += 2;
+ c3 += 2;
+ c2 += 2;
+ c1 += 2;
+ c0 += 2;
+ }
+ if (nc & 1) {
+ _mm_store_ss(c4, vacc4x0123);
+ _mm_store_ss(c3, vacc3x0123);
+ _mm_store_ss(c2, vacc2x0123);
+ _mm_store_ss(c1, vacc1x0123);
+ _mm_store_ss(c0, vacc0x0123);
+ }
+
+ nc = 0;
+ }
+ } while (nc != 0);
+}
diff --git a/src/init.c b/src/init.c
index c0f17bf..009749f 100644
--- a/src/init.c
+++ b/src/init.c
@@ -702,21 +702,21 @@
};
} else if (!XNN_PLATFORM_MOBILE && cpuinfo_has_x86_fma3()) {
xnn_params.f32.gemm = (struct gemm_parameters) {
- .gemm = (xnn_gemm_ukernel_function) xnn_f32_gemm_ukernel_7x8__fma3_broadcast,
- .igemm = (xnn_igemm_ukernel_function) xnn_f32_igemm_ukernel_7x8__fma3_broadcast,
- .gemm1 = (xnn_gemm_ukernel_function) xnn_f32_gemm_ukernel_1x8__fma3_broadcast,
- .igemm1 = (xnn_igemm_ukernel_function) xnn_f32_igemm_ukernel_1x8__fma3_broadcast,
- .mr = 7,
- .nr = 8,
+ .gemm = (xnn_gemm_ukernel_function) xnn_f32_gemm_ukernel_5x16__fma3_broadcast,
+ .igemm = (xnn_igemm_ukernel_function) xnn_f32_igemm_ukernel_5x16__fma3_broadcast,
+ .gemm1 = (xnn_gemm_ukernel_function) xnn_f32_gemm_ukernel_1x16__fma3_broadcast,
+ .igemm1 = (xnn_igemm_ukernel_function) xnn_f32_igemm_ukernel_1x16__fma3_broadcast,
+ .mr = 5,
+ .nr = 16,
};
} else if (!XNN_PLATFORM_MOBILE && cpuinfo_has_x86_avx()) {
xnn_params.f32.gemm = (struct gemm_parameters) {
- .gemm = (xnn_gemm_ukernel_function) xnn_f32_gemm_ukernel_7x8__avx_broadcast,
- .igemm = (xnn_igemm_ukernel_function) xnn_f32_igemm_ukernel_7x8__avx_broadcast,
- .gemm1 = (xnn_gemm_ukernel_function) xnn_f32_gemm_ukernel_1x8__avx_broadcast,
- .igemm1 = (xnn_igemm_ukernel_function) xnn_f32_igemm_ukernel_1x8__avx_broadcast,
- .mr = 7,
- .nr = 8,
+ .gemm = (xnn_gemm_ukernel_function) xnn_f32_gemm_ukernel_5x16__avx_broadcast,
+ .igemm = (xnn_igemm_ukernel_function) xnn_f32_igemm_ukernel_5x16__avx_broadcast,
+ .gemm1 = (xnn_gemm_ukernel_function) xnn_f32_gemm_ukernel_1x16__avx_broadcast,
+ .igemm1 = (xnn_igemm_ukernel_function) xnn_f32_igemm_ukernel_1x16__avx_broadcast,
+ .mr = 5,
+ .nr = 16,
};
} else {
xnn_params.f32.gemm = (struct gemm_parameters) {
diff --git a/src/xnnpack/gemm.h b/src/xnnpack/gemm.h
index 0a29018..bfda4b5 100644
--- a/src/xnnpack/gemm.h
+++ b/src/xnnpack/gemm.h
@@ -32,98 +32,123 @@
size_t cn_stride, \
const union xnn_f32_output_params* params);
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x12__aarch64_neonfma_cortex_a53)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x16__avx512f_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x2__neon_lane_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__neon_lane_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__neon_lane_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__neon_lane_ld128)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_5x8__neon_lane_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__neon_lane_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__neon_lane_ld128)
+
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x2__neonfma_lane_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__neonfma_lane_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__neonfma_lane_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__neonfma_lane_ld128)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_5x8__neonfma_lane_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__neonfma_lane_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__neonfma_lane_ld128)
+
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__neon_dup_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__neon_dup_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__neon_dup_ld128)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__neon_dup_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__neon_dup_ld128)
+
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__neonfma_dup_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__neonfma_dup_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__neonfma_dup_ld128)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__neonfma_dup_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__neonfma_dup_ld128)
+
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8s4__neon)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8s4__neon)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8s4__neon)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_8x8s4__neon)
+
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8s4__neonfma)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8s4__neonfma)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8s4__neonfma)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_8x8s4__neonfma)
+
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__aarch32_neon_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__aarch32_neon_cortex_a75)
+
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__aarch64_neonfma_cortex_a53)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__aarch64_neonfma_cortex_a57)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__aarch64_neonfma_cortex_a75)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__avx_broadcast)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__fma3_broadcast)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__neon_dup_ld64)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__neon_lane_ld64)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__neonfma_dup_ld64)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__neonfma_lane_ld64)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__psimd_loadsplat)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__psimd_splat)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__sse_dup)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__sse_load1)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8s4__neon)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8s4__neonfma)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8s4__psimd)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8s4__sse)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x12__aarch64_neonfma_cortex_a53)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x16__avx512f_broadcast)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x2__neon_lane_ld64)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x2__neonfma_lane_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_ld128)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a57)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a75)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_ld128)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_ld64)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__aarch32_neon_cortex_a75)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__aarch32_neon_ld64)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__avx_broadcast)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__fma3_broadcast)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__neon_dup_ld128)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__neon_dup_ld64)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__neon_lane_ld128)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__neon_lane_ld64)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__neonfma_dup_ld128)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__neonfma_dup_ld64)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__neonfma_lane_ld128)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__neonfma_lane_ld64)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__psimd_loadsplat)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__psimd_splat)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__sse_dup)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x12__aarch64_neonfma_cortex_a53)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x12__aarch64_neonfma_cortex_a53)
+
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__sse_load1)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__sse_load1)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8s4__neon)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8s4__neonfma)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8s4__psimd)
+
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__sse_dup)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__sse_dup)
+
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8s4__sse)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8s4__sse)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_5x16__avx512f_broadcast)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_5x8__aarch64_neonfma_cortex_a75)
+
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__avx_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__avx_broadcast)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_5x8__avx_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__avx_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_7x8__avx_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x16__avx_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_3x16__avx_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x16__avx_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_5x16__avx_broadcast)
+
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__fma3_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__fma3_broadcast)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_5x8__fma3_broadcast)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_5x8__neon_lane_ld64)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_5x8__neonfma_lane_ld64)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__fma3_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_7x8__fma3_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_8x8__fma3_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x16__fma3_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_3x16__fma3_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x16__fma3_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_5x16__fma3_broadcast)
+
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x16__avx512f_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x16__avx512f_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_5x16__avx512f_broadcast)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x16__avx512f_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_7x16__avx512f_broadcast)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_8x16__avx512f_broadcast)
+
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__psimd_loadsplat)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__psimd_loadsplat)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__psimd_loadsplat)
+
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8__psimd_splat)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8__psimd_splat)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__psimd_splat)
+
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x8s4__psimd)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x8s4__psimd)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8s4__psimd)
+
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_5x8__aarch64_neonfma_cortex_a75)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a53)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a57)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a73)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a75)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_ld128)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_ld64)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__avx_broadcast)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__fma3_broadcast)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__neon_dup_ld64)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__neon_lane_ld64)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__neonfma_dup_ld64)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__neonfma_lane_ld64)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__neon_lane_ld128)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__neonfma_lane_ld128)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__neon_dup_ld128)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__neonfma_dup_ld128)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__psimd_loadsplat)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8__psimd_splat)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8s4__neon)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8s4__neonfma)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_6x8s4__psimd)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_7x16__avx512f_broadcast)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_7x8__avx_broadcast)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_7x8__fma3_broadcast)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_8x16__avx512f_broadcast)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_8x8__fma3_broadcast)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_8x8s4__neon)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_8x8s4__neonfma)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x2__wasm)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x4__wasm)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_2x4__wasm)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x2__wasm)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x4__wasm)
+DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x2__scalar)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_1x4__scalar)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_2x4__scalar)
-DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x2__scalar)
DECLARE_F32_GEMM_UKERNEL_FUNCTION(xnn_f32_gemm_ukernel_4x4__scalar)
#define DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(fn_name) \
@@ -140,87 +165,108 @@
const float* acc, \
const union xnn_f32_output_params* params);
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x12__aarch64_neonfma_cortex_a53)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x16__avx512f_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__neon_lane_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__neon_lane_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__neon_lane_ld128)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_5x8__neon_lane_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__neon_lane_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__neon_lane_ld128)
+
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__neonfma_lane_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__neonfma_lane_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__neonfma_lane_ld128)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_5x8__neonfma_lane_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__neonfma_lane_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__neonfma_lane_ld128)
+
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__neon_dup_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__neon_dup_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__neon_dup_ld128)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__neon_dup_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__neon_dup_ld128)
+
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__neonfma_dup_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__neonfma_dup_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__neonfma_dup_ld128)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__neonfma_dup_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__neonfma_dup_ld128)
+
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8s4__neon)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8s4__neon)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8s4__neon)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_8x8s4__neon)
+
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8s4__neonfma)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8s4__neonfma)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8s4__neonfma)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_8x8s4__neonfma)
+
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__aarch64_neonfma_cortex_a53)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__aarch64_neonfma_cortex_a57)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__aarch64_neonfma_cortex_a75)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__avx_broadcast)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__fma3_broadcast)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__neon_dup_ld64)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__neon_lane_ld64)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__neonfma_dup_ld64)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__neonfma_lane_ld64)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__psimd_loadsplat)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__psimd_splat)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__sse_dup)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__sse_load1)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8s4__neon)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8s4__neonfma)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8s4__psimd)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8s4__sse)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x12__aarch64_neonfma_cortex_a53)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x16__avx512f_broadcast)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x2__neon_lane_ld64)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x2__neonfma_lane_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_ld128)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a57)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a75)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_ld128)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_ld64)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__avx_broadcast)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__fma3_broadcast)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__neon_dup_ld128)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__neon_dup_ld64)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__neon_lane_ld128)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__neon_lane_ld64)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__neonfma_dup_ld128)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__neonfma_dup_ld64)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__neonfma_lane_ld128)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__neonfma_lane_ld64)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__psimd_loadsplat)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__psimd_splat)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__sse_dup)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__sse_load1)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8s4__neon)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8s4__neonfma)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8s4__psimd)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8s4__sse)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_5x16__avx512f_broadcast)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_5x8__aarch64_neonfma_cortex_a75)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_5x8__avx_broadcast)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_5x8__fma3_broadcast)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_5x8__neon_lane_ld64)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_5x8__neonfma_lane_ld64)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x16__avx512f_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_ld128)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a53)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a57)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a73)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a75)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_ld128)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_ld64)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x12__aarch64_neonfma_cortex_a53)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x12__aarch64_neonfma_cortex_a53)
+
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__sse_load1)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__sse_load1)
+
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__sse_dup)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__sse_dup)
+
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8s4__sse)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8s4__sse)
+
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__avx_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__avx_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_5x8__avx_broadcast)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__avx_broadcast)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__fma3_broadcast)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__neon_dup_ld64)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__neon_lane_ld64)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__neonfma_dup_ld64)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__neonfma_lane_ld64)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__neon_lane_ld128)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__neonfma_lane_ld128)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__neon_dup_ld128)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__neonfma_dup_ld128)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__psimd_loadsplat)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__psimd_splat)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8s4__neon)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8s4__neonfma)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8s4__psimd)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_7x16__avx512f_broadcast)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_7x8__avx_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x16__avx_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_3x16__avx_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x16__avx_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_5x16__avx_broadcast)
+
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__fma3_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__fma3_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_5x8__fma3_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__fma3_broadcast)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_7x8__fma3_broadcast)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_8x16__avx512f_broadcast)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_8x8__fma3_broadcast)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_8x8s4__neon)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_8x8s4__neonfma)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x16__fma3_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_3x16__fma3_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x16__fma3_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_5x16__fma3_broadcast)
+
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x16__avx512f_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x16__avx512f_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_5x16__avx512f_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x16__avx512f_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_7x16__avx512f_broadcast)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_8x16__avx512f_broadcast)
+
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__psimd_loadsplat)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__psimd_loadsplat)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__psimd_loadsplat)
+
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8__psimd_splat)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8__psimd_splat)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8__psimd_splat)
+
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x8s4__psimd)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x8s4__psimd)
+DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_6x8s4__psimd)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x4__wasm)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_2x4__wasm)
@@ -229,7 +275,6 @@
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_1x4__scalar)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_2x4__scalar)
-DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x2__scalar)
DECLARE_F32_GEMMINC_UKERNEL_FUNCTION(xnn_f32_gemminc_ukernel_4x4__scalar)
#define DECLARE_F16_GEMM_UKERNEL_FUNCTION(fn_name) \
@@ -262,17 +307,19 @@
size_t cn_stride, \
const union xnn_q8_gemm_params* params);
-DECLARE_Q8_GEMM_UKERNEL_FUNCTION(xnn_q8_gemm_ukernel_2x2__scalar)
-DECLARE_Q8_GEMM_UKERNEL_FUNCTION(xnn_q8_gemm_ukernel_2x4c8__neon)
-DECLARE_Q8_GEMM_UKERNEL_FUNCTION(xnn_q8_gemm_ukernel_2x4c8__sse2)
-DECLARE_Q8_GEMM_UKERNEL_FUNCTION(xnn_q8_gemm_ukernel_3x3c8__neon)
-DECLARE_Q8_GEMM_UKERNEL_FUNCTION(xnn_q8_gemm_ukernel_4x4c2__sse2)
-DECLARE_Q8_GEMM_UKERNEL_FUNCTION(xnn_q8_gemm_ukernel_4x8__aarch32_neon)
+
DECLARE_Q8_GEMM_UKERNEL_FUNCTION(xnn_q8_gemm_ukernel_4x8__neon)
-DECLARE_Q8_GEMM_UKERNEL_FUNCTION(xnn_q8_gemm_ukernel_6x4__neon)
-DECLARE_Q8_GEMM_UKERNEL_FUNCTION(xnn_q8_gemm_ukernel_8x8__aarch64_neon)
DECLARE_Q8_GEMM_UKERNEL_FUNCTION(xnn_q8_gemm_ukernel_8x8__neon)
+DECLARE_Q8_GEMM_UKERNEL_FUNCTION(xnn_q8_gemm_ukernel_4x8__aarch32_neon)
+
+DECLARE_Q8_GEMM_UKERNEL_FUNCTION(xnn_q8_gemm_ukernel_8x8__aarch64_neon)
+
+DECLARE_Q8_GEMM_UKERNEL_FUNCTION(xnn_q8_gemm_ukernel_2x4c8__sse2)
+DECLARE_Q8_GEMM_UKERNEL_FUNCTION(xnn_q8_gemm_ukernel_4x4c2__sse2)
+
+DECLARE_Q8_GEMM_UKERNEL_FUNCTION(xnn_q8_gemm_ukernel_2x2__scalar)
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/src/xnnpack/igemm.h b/src/xnnpack/igemm.h
index 2c7b64d..744234e 100644
--- a/src/xnnpack/igemm.h
+++ b/src/xnnpack/igemm.h
@@ -34,64 +34,32 @@
const float* zero, \
const union xnn_f32_output_params* params);
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x12__aarch64_neonfma_cortex_a53)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x16__avx512f_broadcast)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__aarch64_neonfma_cortex_a53)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__aarch64_neonfma_cortex_a57)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__aarch64_neonfma_cortex_a75)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__avx_broadcast)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__fma3_broadcast)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__neon_dup_ld64)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__neon_lane_ld64)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__neonfma_dup_ld64)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__neonfma_lane_ld64)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__psimd_loadsplat)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__psimd_splat)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__sse_dup)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__sse_load1)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8s4__neon)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8s4__neonfma)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8s4__psimd)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8s4__sse)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x12__aarch64_neonfma_cortex_a53)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x16__avx512f_broadcast)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x2__neon_lane_ld64)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x2__neonfma_lane_ld64)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x2c4__psimd)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x2c4__sse)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x4__neon_lane_ld64)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x4__neonfma_lane_ld64)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a75)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__avx_broadcast)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__fma3_broadcast)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__neon_dup_ld128)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__neon_dup_ld64)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__neon_lane_ld128)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__neon_lane_ld64)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__neon_lane_ld64)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__neonfma_dup_ld128)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__neonfma_dup_ld64)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__neonfma_lane_ld128)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__neon_lane_ld128)
+
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x2__neonfma_lane_ld64)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x4__neonfma_lane_ld64)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__neonfma_lane_ld64)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__neonfma_lane_ld64)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__psimd_loadsplat)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__psimd_splat)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__sse_dup)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__sse_load1)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__neonfma_lane_ld128)
+
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__neon_dup_ld64)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__neon_dup_ld64)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__neon_dup_ld128)
+
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__neonfma_dup_ld64)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__neonfma_dup_ld64)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__neonfma_dup_ld128)
+
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8s4__neon)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8s4__neon)
+
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8s4__neonfma)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8s4__neonfma)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8s4__psimd)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8s4__sse)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_5x16__avx512f_broadcast)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_5x8__aarch64_neonfma_cortex_a75)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_5x8__avx_broadcast)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_5x8__fma3_broadcast)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x16__avx512f_broadcast)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a57)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a75)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__avx_broadcast)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__fma3_broadcast)
+
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__neon_dup_ld64)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__neon_lane_ld64)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__neonfma_dup_ld64)
@@ -100,27 +68,85 @@
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__neonfma_lane_ld128)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__neon_dup_ld128)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__neonfma_dup_ld128)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__psimd_loadsplat)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__psimd_splat)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8s4__neon)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8s4__neonfma)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8s4__psimd)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_7x16__avx512f_broadcast)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_7x8__avx_broadcast)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_7x8__fma3_broadcast)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_8x16__avx512f_broadcast)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_8x8__fma3_broadcast)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_8x8s4__neon)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_8x8s4__neonfma)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__aarch64_neonfma_cortex_a53)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__aarch64_neonfma_cortex_a57)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__aarch64_neonfma_cortex_a75)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a75)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_5x8__aarch64_neonfma_cortex_a75)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a57)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a75)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x12__aarch64_neonfma_cortex_a53)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x12__aarch64_neonfma_cortex_a53)
+
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__sse_load1)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__sse_load1)
+
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__sse_dup)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__sse_dup)
+
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8s4__sse)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8s4__sse)
+
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x2c4__sse)
+
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__avx_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__avx_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_5x8__avx_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__avx_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_7x8__avx_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x16__avx_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_3x16__avx_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x16__avx_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_5x16__avx_broadcast)
+
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__fma3_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__fma3_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_5x8__fma3_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__fma3_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_7x8__fma3_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_8x8__fma3_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x16__fma3_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_3x16__fma3_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x16__fma3_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_5x16__fma3_broadcast)
+
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x16__avx512f_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x16__avx512f_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_5x16__avx512f_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x16__avx512f_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_7x16__avx512f_broadcast)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_8x16__avx512f_broadcast)
+
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__psimd_loadsplat)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__psimd_loadsplat)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__psimd_loadsplat)
+
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8__psimd_splat)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8__psimd_splat)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8__psimd_splat)
+
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x8s4__psimd)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x8s4__psimd)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_6x8s4__psimd)
+
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x2c4__psimd)
+
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x4__wasm)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_2x4__wasm)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x2__wasm)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x4__wasm)
+DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x2__scalar)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_1x4__scalar)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_2x4__scalar)
-DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x2__scalar)
DECLARE_F32_IGEMM_UKERNEL_FUNCTION(xnn_f32_igemm_ukernel_4x4__scalar)
#define DECLARE_Q8_IGEMM_UKERNEL_FUNCTION(fn_name) \
@@ -138,11 +164,13 @@
const uint8_t* zero, \
const union xnn_q8_gemm_params* params);
-DECLARE_Q8_IGEMM_UKERNEL_FUNCTION(xnn_q8_igemm_ukernel_2x2__scalar)
-DECLARE_Q8_IGEMM_UKERNEL_FUNCTION(xnn_q8_igemm_ukernel_4x4c2__sse2)
DECLARE_Q8_IGEMM_UKERNEL_FUNCTION(xnn_q8_igemm_ukernel_4x8__neon)
DECLARE_Q8_IGEMM_UKERNEL_FUNCTION(xnn_q8_igemm_ukernel_8x8__neon)
+DECLARE_Q8_IGEMM_UKERNEL_FUNCTION(xnn_q8_igemm_ukernel_4x4c2__sse2)
+
+DECLARE_Q8_IGEMM_UKERNEL_FUNCTION(xnn_q8_igemm_ukernel_2x2__scalar)
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/test/f32-gemm.cc b/test/f32-gemm.cc
index d479b2b..461ce91 100644
--- a/test/f32-gemm.cc
+++ b/test/f32-gemm.cc
@@ -27665,6 +27665,1422 @@
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, k_eq_1) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, k_eq_1_strided_a) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .a_stride(3)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, k_eq_1_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, k_eq_1_subtile_m) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(16)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, k_eq_1_subtile_n) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, k_gt_1) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, k_gt_1_strided_a) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .a_stride(11)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, k_gt_1_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, n_gt_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, n_gt_16_strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, n_gt_16_strided_a) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, n_gt_16_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, n_div_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, n_div_16_strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, n_div_16_strided_a) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, n_div_16_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, strided_cm_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(19)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, qmin) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .qmin(128)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, qmax) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .qmax(128)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_1X16__AVX_BROADCAST, strided_cm) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .cm_stride(19)
+ .Test(xnn_f32_gemm_ukernel_1x16__avx_broadcast);
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, k_eq_1) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, k_eq_1_strided_a) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .a_stride(3)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, k_eq_1_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t m = 1; m <= 3; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, k_eq_1_subtile_m) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t m = 1; m <= 3; m++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(16)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, k_eq_1_subtile_n) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, k_gt_1) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, k_gt_1_strided_a) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .a_stride(11)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, k_gt_1_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, n_gt_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, n_gt_16_strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, n_gt_16_strided_a) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(n)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, n_gt_16_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, n_div_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, n_div_16_strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(n)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, n_div_16_strided_a) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(n)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, n_div_16_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, strided_cm_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(19)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, qmin) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .qmin(128)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, qmax) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .qmax(128)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_3X16__AVX_BROADCAST, strided_cm) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .cm_stride(19)
+ .Test(xnn_f32_gemm_ukernel_3x16__avx_broadcast);
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, k_eq_1) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, k_eq_1_strided_a) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .a_stride(3)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, k_eq_1_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, k_eq_1_subtile_m) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(16)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, k_eq_1_subtile_n) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, k_gt_1) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, k_gt_1_strided_a) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .a_stride(11)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, k_gt_1_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, n_gt_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, n_gt_16_strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, n_gt_16_strided_a) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, n_gt_16_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, n_div_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, n_div_16_strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, n_div_16_strided_a) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, n_div_16_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, strided_cm_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(19)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, qmin) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .qmin(128)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, qmax) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .qmax(128)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_4X16__AVX_BROADCAST, strided_cm) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .cm_stride(19)
+ .Test(xnn_f32_gemm_ukernel_4x16__avx_broadcast);
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, k_eq_1) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, k_eq_1_strided_a) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .a_stride(3)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, k_eq_1_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, k_eq_1_subtile_m) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(16)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, k_eq_1_subtile_n) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, k_gt_1) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, k_gt_1_strided_a) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .a_stride(11)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, k_gt_1_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, n_gt_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, n_gt_16_strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, n_gt_16_strided_a) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, n_gt_16_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, n_div_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, n_div_16_strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, n_div_16_strided_a) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, n_div_16_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, strided_cm_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(19)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, qmin) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .qmin(128)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, qmax) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .qmax(128)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+
+ TEST(F32_GEMM_5X16__AVX_BROADCAST, strided_cm) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .cm_stride(19)
+ .Test(xnn_f32_gemm_ukernel_5x16__avx_broadcast);
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
TEST(F32_GEMM_1X8__FMA3_BROADCAST, k_eq_1) {
TEST_REQUIRES_X86_FMA3;
GemmMicrokernelTester()
@@ -29789,6 +31205,1422 @@
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, k_eq_1) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, k_eq_1_strided_a) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .a_stride(3)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, k_eq_1_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, k_eq_1_subtile_m) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(16)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, k_eq_1_subtile_n) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, k_gt_1) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, k_gt_1_strided_a) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .a_stride(11)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, k_gt_1_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, n_gt_16) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, n_gt_16_strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, n_gt_16_strided_a) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, n_gt_16_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, n_div_16) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, n_div_16_strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, n_div_16_strided_a) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, n_div_16_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, strided_cm_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(19)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, qmin) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .qmin(128)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, qmax) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .qmax(128)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_1X16__FMA3_BROADCAST, strided_cm) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .cm_stride(19)
+ .Test(xnn_f32_gemm_ukernel_1x16__fma3_broadcast);
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, k_eq_1) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, k_eq_1_strided_a) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .a_stride(3)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, k_eq_1_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t m = 1; m <= 3; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, k_eq_1_subtile_m) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t m = 1; m <= 3; m++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(16)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, k_eq_1_subtile_n) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, k_gt_1) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, k_gt_1_strided_a) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .a_stride(11)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, k_gt_1_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, n_gt_16) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, n_gt_16_strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, n_gt_16_strided_a) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(n)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, n_gt_16_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, n_div_16) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, n_div_16_strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(n)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, n_div_16_strided_a) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(n)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, n_div_16_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, strided_cm_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(19)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, qmin) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .qmin(128)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, qmax) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .qmax(128)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_3X16__FMA3_BROADCAST, strided_cm) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .cm_stride(19)
+ .Test(xnn_f32_gemm_ukernel_3x16__fma3_broadcast);
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, k_eq_1) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, k_eq_1_strided_a) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .a_stride(3)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, k_eq_1_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, k_eq_1_subtile_m) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(16)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, k_eq_1_subtile_n) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, k_gt_1) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, k_gt_1_strided_a) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .a_stride(11)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, k_gt_1_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, n_gt_16) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, n_gt_16_strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, n_gt_16_strided_a) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, n_gt_16_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, n_div_16) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, n_div_16_strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, n_div_16_strided_a) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, n_div_16_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, strided_cm_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(19)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, qmin) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .qmin(128)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, qmax) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .qmax(128)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_4X16__FMA3_BROADCAST, strided_cm) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .cm_stride(19)
+ .Test(xnn_f32_gemm_ukernel_4x16__fma3_broadcast);
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, k_eq_1) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, k_eq_1_strided_a) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .a_stride(3)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, k_eq_1_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, k_eq_1_subtile_m) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(16)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, k_eq_1_subtile_n) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, k_gt_1) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, k_gt_1_strided_a) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .a_stride(11)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, k_gt_1_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, n_gt_16) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, n_gt_16_strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, n_gt_16_strided_a) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, n_gt_16_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, n_div_16) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, n_div_16_strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, n_div_16_strided_a) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, n_div_16_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, strided_cm_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(19)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, qmin) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .qmin(128)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, qmax) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .qmax(128)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+
+ TEST(F32_GEMM_5X16__FMA3_BROADCAST, strided_cm) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .cm_stride(19)
+ .Test(xnn_f32_gemm_ukernel_5x16__fma3_broadcast);
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
TEST(F32_GEMM_1X16__AVX512F_BROADCAST, k_eq_1) {
TEST_REQUIRES_X86_AVX512F;
GemmMicrokernelTester()
diff --git a/test/f32-gemm.yaml b/test/f32-gemm.yaml
index 1117a8f..b4be22d 100644
--- a/test/f32-gemm.yaml
+++ b/test/f32-gemm.yaml
@@ -165,6 +165,14 @@
k-block: 1
- name: xnn_f32_gemm_ukernel_7x8__avx_broadcast
k-block: 1
+- name: xnn_f32_gemm_ukernel_1x16__avx_broadcast
+ k-block: 1
+- name: xnn_f32_gemm_ukernel_3x16__avx_broadcast
+ k-block: 1
+- name: xnn_f32_gemm_ukernel_4x16__avx_broadcast
+ k-block: 1
+- name: xnn_f32_gemm_ukernel_5x16__avx_broadcast
+ k-block: 1
- name: xnn_f32_gemm_ukernel_1x8__fma3_broadcast
k-block: 1
- name: xnn_f32_gemm_ukernel_4x8__fma3_broadcast
@@ -177,6 +185,14 @@
k-block: 1
- name: xnn_f32_gemm_ukernel_8x8__fma3_broadcast
k-block: 1
+- name: xnn_f32_gemm_ukernel_1x16__fma3_broadcast
+ k-block: 1
+- name: xnn_f32_gemm_ukernel_3x16__fma3_broadcast
+ k-block: 1
+- name: xnn_f32_gemm_ukernel_4x16__fma3_broadcast
+ k-block: 1
+- name: xnn_f32_gemm_ukernel_5x16__fma3_broadcast
+ k-block: 1
- name: xnn_f32_gemm_ukernel_1x16__avx512f_broadcast
k-block: 1
- name: xnn_f32_gemm_ukernel_4x16__avx512f_broadcast
diff --git a/test/f32-igemm.cc b/test/f32-igemm.cc
index e23a7c4..51a2378 100644
--- a/test/f32-igemm.cc
+++ b/test/f32-igemm.cc
@@ -26550,6 +26550,1598 @@
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, k_eq_1) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, k_eq_1_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, k_eq_1_subtile_m) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(16)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, k_eq_1_subtile_n) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, k_gt_1) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, k_gt_1_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, n_gt_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, n_gt_16_strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, n_gt_16_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, n_div_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, n_div_16_strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, n_div_16_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, small_kernel) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, small_kernel_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .ks(3)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, n_gt_16_small_kernel) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, n_div_16_small_kernel) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, strided_cm_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(19)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, a_offset) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .a_offset(7)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, zero) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t mz = 0; mz < 1; mz++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .a_offset(7)
+ .zero_index(mz)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, qmin) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .qmin(128)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, qmax) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .qmax(128)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+
+ TEST(F32_IGEMM_1X16__AVX_BROADCAST, strided_cm) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .cm_stride(19)
+ .Test(xnn_f32_igemm_ukernel_1x16__avx_broadcast);
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, k_eq_1) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, k_eq_1_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t m = 1; m <= 3; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, k_eq_1_subtile_m) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t m = 1; m <= 3; m++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(16)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, k_eq_1_subtile_n) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, k_gt_1) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, k_gt_1_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, n_gt_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, n_gt_16_strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, n_gt_16_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, n_div_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, n_div_16_strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(n)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, n_div_16_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, small_kernel) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, small_kernel_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .ks(3)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, n_gt_16_small_kernel) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, n_div_16_small_kernel) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, strided_cm_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(19)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, a_offset) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .a_offset(17)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, zero) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t mz = 0; mz < 3; mz++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .a_offset(17)
+ .zero_index(mz)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, qmin) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .qmin(128)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, qmax) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .qmax(128)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+
+ TEST(F32_IGEMM_3X16__AVX_BROADCAST, strided_cm) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .cm_stride(19)
+ .Test(xnn_f32_igemm_ukernel_3x16__avx_broadcast);
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, k_eq_1) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, k_eq_1_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, k_eq_1_subtile_m) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(16)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, k_eq_1_subtile_n) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, k_gt_1) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, k_gt_1_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, n_gt_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, n_gt_16_strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, n_gt_16_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, n_div_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, n_div_16_strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, n_div_16_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, small_kernel) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, small_kernel_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .ks(3)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, n_gt_16_small_kernel) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, n_div_16_small_kernel) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, strided_cm_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(19)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, a_offset) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .a_offset(23)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, zero) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t mz = 0; mz < 4; mz++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .a_offset(23)
+ .zero_index(mz)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, qmin) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .qmin(128)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, qmax) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .qmax(128)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+
+ TEST(F32_IGEMM_4X16__AVX_BROADCAST, strided_cm) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .cm_stride(19)
+ .Test(xnn_f32_igemm_ukernel_4x16__avx_broadcast);
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, k_eq_1) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, k_eq_1_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, k_eq_1_subtile_m) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(16)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, k_eq_1_subtile_n) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, k_gt_1) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, k_gt_1_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 2; k < 10; k++) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, n_gt_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, n_gt_16_strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, n_gt_16_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, n_div_16) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, n_div_16_strided_cn) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, n_div_16_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, small_kernel) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, small_kernel_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .ks(3)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, n_gt_16_small_kernel) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, n_div_16_small_kernel) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, strided_cm_subtile) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(19)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, a_offset) {
+ TEST_REQUIRES_X86_AVX;
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .a_offset(29)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, zero) {
+ TEST_REQUIRES_X86_AVX;
+ for (uint32_t mz = 0; mz < 5; mz++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .a_offset(29)
+ .zero_index(mz)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, qmin) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .qmin(128)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, qmax) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .qmax(128)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+
+ TEST(F32_IGEMM_5X16__AVX_BROADCAST, strided_cm) {
+ TEST_REQUIRES_X86_AVX;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .cm_stride(19)
+ .Test(xnn_f32_igemm_ukernel_5x16__avx_broadcast);
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
TEST(F32_IGEMM_1X8__FMA3_BROADCAST, k_eq_1) {
TEST_REQUIRES_X86_FMA3;
GemmMicrokernelTester()
@@ -28938,6 +30530,1598 @@
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, k_eq_1) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, k_eq_1_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, k_eq_1_subtile_m) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(16)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, k_eq_1_subtile_n) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, k_gt_1) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, k_gt_1_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, n_gt_16) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, n_gt_16_strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, n_gt_16_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, n_div_16) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, n_div_16_strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, n_div_16_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, small_kernel) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, small_kernel_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .ks(3)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, n_gt_16_small_kernel) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, n_div_16_small_kernel) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, strided_cm_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(19)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, a_offset) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .a_offset(7)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, zero) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t mz = 0; mz < 1; mz++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .a_offset(7)
+ .zero_index(mz)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, qmin) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .qmin(128)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, qmax) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .qmax(128)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+
+ TEST(F32_IGEMM_1X16__FMA3_BROADCAST, strided_cm) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(16)
+ .k(1)
+ .cm_stride(19)
+ .Test(xnn_f32_igemm_ukernel_1x16__fma3_broadcast);
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, k_eq_1) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, k_eq_1_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t m = 1; m <= 3; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, k_eq_1_subtile_m) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t m = 1; m <= 3; m++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(16)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, k_eq_1_subtile_n) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, k_gt_1) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, k_gt_1_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, n_gt_16) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, n_gt_16_strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, n_gt_16_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, n_div_16) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, n_div_16_strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(n)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, n_div_16_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, small_kernel) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, small_kernel_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .ks(3)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, n_gt_16_small_kernel) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, n_div_16_small_kernel) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, strided_cm_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 3; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(19)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, a_offset) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .a_offset(17)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, zero) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t mz = 0; mz < 3; mz++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .a_offset(17)
+ .zero_index(mz)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, qmin) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .qmin(128)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, qmax) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .qmax(128)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+
+ TEST(F32_IGEMM_3X16__FMA3_BROADCAST, strided_cm) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(3)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(3)
+ .n(16)
+ .k(1)
+ .cm_stride(19)
+ .Test(xnn_f32_igemm_ukernel_3x16__fma3_broadcast);
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, k_eq_1) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, k_eq_1_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, k_eq_1_subtile_m) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(16)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, k_eq_1_subtile_n) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, k_gt_1) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, k_gt_1_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, n_gt_16) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, n_gt_16_strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, n_gt_16_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, n_div_16) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, n_div_16_strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, n_div_16_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, small_kernel) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, small_kernel_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .ks(3)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, n_gt_16_small_kernel) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, n_div_16_small_kernel) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, strided_cm_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(19)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, a_offset) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .a_offset(23)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, zero) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t mz = 0; mz < 4; mz++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .a_offset(23)
+ .zero_index(mz)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, qmin) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .qmin(128)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, qmax) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .qmax(128)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+
+ TEST(F32_IGEMM_4X16__FMA3_BROADCAST, strided_cm) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(16)
+ .k(1)
+ .cm_stride(19)
+ .Test(xnn_f32_igemm_ukernel_4x16__fma3_broadcast);
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, k_eq_1) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, k_eq_1_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, k_eq_1_subtile_m) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(16)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, k_eq_1_subtile_n) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(1)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, k_gt_1) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, k_gt_1_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 2; k < 10; k++) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, n_gt_16) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, n_gt_16_strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, n_gt_16_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, n_div_16) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, n_div_16_strided_cn) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(k)
+ .cn_stride(19)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, n_div_16_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, small_kernel) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, small_kernel_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .ks(3)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, n_gt_16_small_kernel) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 17; n < 32; n++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, n_div_16_small_kernel) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t n = 32; n <= 48; n += 16) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, strided_cm_subtile) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 16; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(19)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, a_offset) {
+ TEST_REQUIRES_X86_FMA3;
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .a_offset(29)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, zero) {
+ TEST_REQUIRES_X86_FMA3;
+ for (uint32_t mz = 0; mz < 5; mz++) {
+ for (size_t k = 1; k <= 5; k += 2) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(k)
+ .ks(3)
+ .a_offset(29)
+ .zero_index(mz)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, qmin) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .qmin(128)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, qmax) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .qmax(128)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+
+ TEST(F32_IGEMM_5X16__FMA3_BROADCAST, strided_cm) {
+ TEST_REQUIRES_X86_FMA3;
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(16)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(16)
+ .k(1)
+ .cm_stride(19)
+ .Test(xnn_f32_igemm_ukernel_5x16__fma3_broadcast);
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
TEST(F32_IGEMM_1X16__AVX512F_BROADCAST, k_eq_1) {
TEST_REQUIRES_X86_AVX512F;
GemmMicrokernelTester()
diff --git a/test/f32-igemm.yaml b/test/f32-igemm.yaml
index 7db694b..29d0828 100644
--- a/test/f32-igemm.yaml
+++ b/test/f32-igemm.yaml
@@ -153,6 +153,14 @@
k-block: 1
- name: xnn_f32_igemm_ukernel_7x8__avx_broadcast
k-block: 1
+- name: xnn_f32_igemm_ukernel_1x16__avx_broadcast
+ k-block: 1
+- name: xnn_f32_igemm_ukernel_3x16__avx_broadcast
+ k-block: 1
+- name: xnn_f32_igemm_ukernel_4x16__avx_broadcast
+ k-block: 1
+- name: xnn_f32_igemm_ukernel_5x16__avx_broadcast
+ k-block: 1
- name: xnn_f32_igemm_ukernel_1x8__fma3_broadcast
k-block: 1
- name: xnn_f32_igemm_ukernel_4x8__fma3_broadcast
@@ -165,6 +173,14 @@
k-block: 1
- name: xnn_f32_igemm_ukernel_8x8__fma3_broadcast
k-block: 1
+- name: xnn_f32_igemm_ukernel_1x16__fma3_broadcast
+ k-block: 1
+- name: xnn_f32_igemm_ukernel_3x16__fma3_broadcast
+ k-block: 1
+- name: xnn_f32_igemm_ukernel_4x16__fma3_broadcast
+ k-block: 1
+- name: xnn_f32_igemm_ukernel_5x16__fma3_broadcast
+ k-block: 1
- name: xnn_f32_igemm_ukernel_1x16__avx512f_broadcast
k-block: 1
- name: xnn_f32_igemm_ukernel_4x16__avx512f_broadcast