4x8 A53 GEMM, and GEMMINC unpipelined microkernels.
PiperOrigin-RevId: 276743130
diff --git a/test/f32-gemm.cc b/test/f32-gemm.cc
index ee7fea1..bac8575 100644
--- a/test/f32-gemm.cc
+++ b/test/f32-gemm.cc
@@ -1526,6 +1526,462 @@
#if XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(2)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, strided_cn) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(2)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_strided_a) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(2)
+ .a_stride(5)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(2)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile_m) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(2)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile_n) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(2)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_2) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k < 2; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_2_strided_a) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k < 2; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .a_stride(5)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_2_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k < 2; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_2) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 3; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_2_strided_a) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 3; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_2_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 3; k < 4; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_div_2) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 4; k <= 20; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_div_2_strided_a) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 4; k <= 20; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .a_stride(23)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_div_2_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 4; k <= 20; k += 2) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_strided_cn) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_strided_a) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .a_stride(13)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_strided_cn) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_strided_a) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .a_stride(13)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, strided_cm_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k <= 10; k += 3) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, qmin) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(2)
+ .qmin(128)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, qmax) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(2)
+ .qmax(128)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+
+ TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, strided_cm) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(2)
+ .cm_stride(11)
+ .Test(xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+#endif // XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
+
+
+#if XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
TEST(F32_GEMM_4X8__AARCH64_NEONFMA_CORTEX_A57, k_eq_8) {
TEST_REQUIRES_ARM_NEON_FMA;
GemmMicrokernelTester()
@@ -3029,7 +3485,7 @@
#if XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
- TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_8) {
+ TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2) {
TEST_REQUIRES_ARM_NEON_FMA;
GemmMicrokernelTester()
.mr(6)
@@ -3038,7 +3494,7 @@
.sr(1)
.m(6)
.n(8)
- .k(8)
+ .k(2)
.Test(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
@@ -3051,12 +3507,12 @@
.sr(1)
.m(6)
.n(8)
- .k(8)
+ .k(2)
.cn_stride(11)
.Test(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
- TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_8_strided_a) {
+ TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_strided_a) {
TEST_REQUIRES_ARM_NEON_FMA;
GemmMicrokernelTester()
.mr(6)
@@ -3065,12 +3521,12 @@
.sr(1)
.m(6)
.n(8)
- .k(8)
- .a_stride(11)
+ .k(2)
+ .a_stride(5)
.Test(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
- TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_8_subtile) {
+ TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t m = 1; m <= 6; m++) {
for (uint32_t n = 1; n <= 8; n++) {
@@ -3081,14 +3537,14 @@
.sr(1)
.m(m)
.n(n)
- .k(8)
+ .k(2)
.iterations(1)
.Test(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
}
- TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_8_subtile_m) {
+ TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile_m) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t m = 1; m <= 6; m++) {
GemmMicrokernelTester()
@@ -3098,13 +3554,13 @@
.sr(1)
.m(m)
.n(8)
- .k(8)
+ .k(2)
.iterations(1)
.Test(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
- TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_8_subtile_n) {
+ TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile_n) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 1; n <= 8; n++) {
GemmMicrokernelTester()
@@ -3114,15 +3570,15 @@
.sr(1)
.m(6)
.n(n)
- .k(8)
+ .k(2)
.iterations(1)
.Test(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
- TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_8) {
+ TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_2) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 1; k < 8; k++) {
+ for (size_t k = 1; k < 2; k++) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3135,9 +3591,9 @@
}
}
- TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_8_strided_a) {
+ TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_2_strided_a) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 1; k < 8; k++) {
+ for (size_t k = 1; k < 2; k++) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3146,14 +3602,14 @@
.m(6)
.n(8)
.k(k)
- .a_stride(11)
+ .a_stride(5)
.Test(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
- TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_8_subtile) {
+ TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_2_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 1; k < 8; k++) {
+ for (size_t k = 1; k < 2; k++) {
for (uint32_t m = 1; m <= 6; m++) {
for (uint32_t n = 1; n <= 8; n++) {
GemmMicrokernelTester()
@@ -3171,9 +3627,9 @@
}
}
- TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_8) {
+ TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_2) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 9; k < 16; k++) {
+ for (size_t k = 3; k < 4; k++) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3186,9 +3642,9 @@
}
}
- TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_8_strided_a) {
+ TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_2_strided_a) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 9; k < 16; k++) {
+ for (size_t k = 3; k < 4; k++) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3197,14 +3653,14 @@
.m(6)
.n(8)
.k(k)
- .a_stride(19)
+ .a_stride(7)
.Test(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
- TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_8_subtile) {
+ TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_2_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 9; k < 16; k++) {
+ for (size_t k = 3; k < 4; k++) {
for (uint32_t m = 1; m <= 6; m++) {
for (uint32_t n = 1; n <= 8; n++) {
GemmMicrokernelTester()
@@ -3222,9 +3678,9 @@
}
}
- TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_div_8) {
+ TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_div_2) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 16; k <= 80; k += 8) {
+ for (size_t k = 4; k <= 20; k += 2) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3237,9 +3693,9 @@
}
}
- TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_div_8_strided_a) {
+ TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_div_2_strided_a) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 16; k <= 80; k += 8) {
+ for (size_t k = 4; k <= 20; k += 2) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3248,14 +3704,14 @@
.m(6)
.n(8)
.k(k)
- .a_stride(83)
+ .a_stride(23)
.Test(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
- TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_div_8_subtile) {
+ TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_div_2_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 16; k <= 80; k += 8) {
+ for (size_t k = 4; k <= 20; k += 2) {
for (uint32_t m = 1; m <= 6; m++) {
for (uint32_t n = 1; n <= 8; n++) {
GemmMicrokernelTester()
@@ -3276,7 +3732,7 @@
TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 9; n < 16; n++) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3293,7 +3749,7 @@
TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_strided_cn) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 9; n < 16; n++) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3311,7 +3767,7 @@
TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_strided_a) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 9; n < 16; n++) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3320,7 +3776,7 @@
.m(6)
.n(n)
.k(k)
- .a_stride(43)
+ .a_stride(13)
.Test(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
@@ -3329,7 +3785,7 @@
TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 9; n < 16; n++) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
for (uint32_t m = 1; m <= 6; m++) {
GemmMicrokernelTester()
.mr(6)
@@ -3349,7 +3805,7 @@
TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 16; n <= 24; n += 8) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3366,7 +3822,7 @@
TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_strided_cn) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 16; n <= 24; n += 8) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3384,7 +3840,7 @@
TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_strided_a) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 16; n <= 24; n += 8) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3393,7 +3849,7 @@
.m(6)
.n(n)
.k(k)
- .a_stride(43)
+ .a_stride(13)
.Test(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
@@ -3402,7 +3858,7 @@
TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 16; n <= 24; n += 8) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
for (uint32_t m = 1; m <= 6; m++) {
GemmMicrokernelTester()
.mr(6)
@@ -3421,7 +3877,7 @@
TEST(F32_GEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, strided_cm_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
for (uint32_t m = 1; m <= 6; m++) {
for (uint32_t n = 1; n <= 8; n++) {
GemmMicrokernelTester()
@@ -3449,7 +3905,7 @@
.sr(1)
.m(6)
.n(8)
- .k(8)
+ .k(2)
.qmin(128)
.Test(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
@@ -3463,7 +3919,7 @@
.sr(1)
.m(6)
.n(8)
- .k(8)
+ .k(2)
.qmax(128)
.Test(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
@@ -3477,7 +3933,7 @@
.sr(1)
.m(6)
.n(8)
- .k(8)
+ .k(2)
.cm_stride(11)
.Test(xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
diff --git a/test/f32-gemm.yaml b/test/f32-gemm.yaml
index b48ebd8..015fb24 100644
--- a/test/f32-gemm.yaml
+++ b/test/f32-gemm.yaml
@@ -14,6 +14,10 @@
k-block: 8
pipelined: true
assembly: true
+- name: xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a53
+ k-block: 2
+ pipelined: false
+ assembly: true
- name: xnn_f32_gemm_ukernel_4x8__aarch64_neonfma_cortex_a57
k-block: 8
pipelined: true
@@ -27,7 +31,7 @@
pipelined: true
assembly: true
- name: xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a53
- k-block: 8
+ k-block: 2
pipelined: false
assembly: true
- name: xnn_f32_gemm_ukernel_6x8__aarch64_neonfma_cortex_a57
diff --git a/test/f32-gemminc.cc b/test/f32-gemminc.cc
index e3d896a..d30533f 100644
--- a/test/f32-gemminc.cc
+++ b/test/f32-gemminc.cc
@@ -1526,6 +1526,462 @@
#if XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(2)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, strided_cn) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(2)
+ .cn_stride(11)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_strided_a) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(2)
+ .a_stride(5)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(2)
+ .iterations(1)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile_m) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(2)
+ .iterations(1)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile_n) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(2)
+ .iterations(1)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_2) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k < 2; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_2_strided_a) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k < 2; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .a_stride(5)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_2_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k < 2; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_2) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 3; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_2_strided_a) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 3; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_2_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 3; k < 4; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, k_div_2) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 4; k <= 20; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, k_div_2_strided_a) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 4; k <= 20; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .a_stride(23)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, k_div_2_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 4; k <= 20; k += 2) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_strided_cn) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_strided_a) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .a_stride(13)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_strided_cn) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_strided_a) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .a_stride(13)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, strided_cm_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k <= 10; k += 3) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, qmin) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(2)
+ .qmin(128)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, qmax) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(2)
+ .qmax(128)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+
+ TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A53, strided_cm) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(2)
+ .cm_stride(11)
+ .Test(xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+#endif // XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
+
+
+#if XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
TEST(F32_GEMMINC_4X8__AARCH64_NEONFMA_CORTEX_A57, k_eq_8) {
TEST_REQUIRES_ARM_NEON_FMA;
GemmMicrokernelTester()
@@ -3029,7 +3485,7 @@
#if XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
- TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_8) {
+ TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2) {
TEST_REQUIRES_ARM_NEON_FMA;
GemmMicrokernelTester()
.mr(6)
@@ -3038,7 +3494,7 @@
.sr(1)
.m(6)
.n(8)
- .k(8)
+ .k(2)
.Test(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
@@ -3051,12 +3507,12 @@
.sr(1)
.m(6)
.n(8)
- .k(8)
+ .k(2)
.cn_stride(11)
.Test(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
- TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_8_strided_a) {
+ TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_strided_a) {
TEST_REQUIRES_ARM_NEON_FMA;
GemmMicrokernelTester()
.mr(6)
@@ -3065,12 +3521,12 @@
.sr(1)
.m(6)
.n(8)
- .k(8)
- .a_stride(11)
+ .k(2)
+ .a_stride(5)
.Test(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
- TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_8_subtile) {
+ TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t m = 1; m <= 6; m++) {
for (uint32_t n = 1; n <= 8; n++) {
@@ -3081,14 +3537,14 @@
.sr(1)
.m(m)
.n(n)
- .k(8)
+ .k(2)
.iterations(1)
.Test(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
}
- TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_8_subtile_m) {
+ TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile_m) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t m = 1; m <= 6; m++) {
GemmMicrokernelTester()
@@ -3098,13 +3554,13 @@
.sr(1)
.m(m)
.n(8)
- .k(8)
+ .k(2)
.iterations(1)
.Test(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
- TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_8_subtile_n) {
+ TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile_n) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 1; n <= 8; n++) {
GemmMicrokernelTester()
@@ -3114,15 +3570,15 @@
.sr(1)
.m(6)
.n(n)
- .k(8)
+ .k(2)
.iterations(1)
.Test(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
- TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_8) {
+ TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_2) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 1; k < 8; k++) {
+ for (size_t k = 1; k < 2; k++) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3135,9 +3591,9 @@
}
}
- TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_8_strided_a) {
+ TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_2_strided_a) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 1; k < 8; k++) {
+ for (size_t k = 1; k < 2; k++) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3146,14 +3602,14 @@
.m(6)
.n(8)
.k(k)
- .a_stride(11)
+ .a_stride(5)
.Test(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
- TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_8_subtile) {
+ TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_2_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 1; k < 8; k++) {
+ for (size_t k = 1; k < 2; k++) {
for (uint32_t m = 1; m <= 6; m++) {
for (uint32_t n = 1; n <= 8; n++) {
GemmMicrokernelTester()
@@ -3171,9 +3627,9 @@
}
}
- TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_8) {
+ TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_2) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 9; k < 16; k++) {
+ for (size_t k = 3; k < 4; k++) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3186,9 +3642,9 @@
}
}
- TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_8_strided_a) {
+ TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_2_strided_a) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 9; k < 16; k++) {
+ for (size_t k = 3; k < 4; k++) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3197,14 +3653,14 @@
.m(6)
.n(8)
.k(k)
- .a_stride(19)
+ .a_stride(7)
.Test(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
- TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_8_subtile) {
+ TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_2_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 9; k < 16; k++) {
+ for (size_t k = 3; k < 4; k++) {
for (uint32_t m = 1; m <= 6; m++) {
for (uint32_t n = 1; n <= 8; n++) {
GemmMicrokernelTester()
@@ -3222,9 +3678,9 @@
}
}
- TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_div_8) {
+ TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_div_2) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 16; k <= 80; k += 8) {
+ for (size_t k = 4; k <= 20; k += 2) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3237,9 +3693,9 @@
}
}
- TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_div_8_strided_a) {
+ TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_div_2_strided_a) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 16; k <= 80; k += 8) {
+ for (size_t k = 4; k <= 20; k += 2) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3248,14 +3704,14 @@
.m(6)
.n(8)
.k(k)
- .a_stride(83)
+ .a_stride(23)
.Test(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
- TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_div_8_subtile) {
+ TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, k_div_2_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 16; k <= 80; k += 8) {
+ for (size_t k = 4; k <= 20; k += 2) {
for (uint32_t m = 1; m <= 6; m++) {
for (uint32_t n = 1; n <= 8; n++) {
GemmMicrokernelTester()
@@ -3276,7 +3732,7 @@
TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 9; n < 16; n++) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3293,7 +3749,7 @@
TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_strided_cn) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 9; n < 16; n++) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3311,7 +3767,7 @@
TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_strided_a) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 9; n < 16; n++) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3320,7 +3776,7 @@
.m(6)
.n(n)
.k(k)
- .a_stride(43)
+ .a_stride(13)
.Test(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
@@ -3329,7 +3785,7 @@
TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 9; n < 16; n++) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
for (uint32_t m = 1; m <= 6; m++) {
GemmMicrokernelTester()
.mr(6)
@@ -3349,7 +3805,7 @@
TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 16; n <= 24; n += 8) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3366,7 +3822,7 @@
TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_strided_cn) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 16; n <= 24; n += 8) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3384,7 +3840,7 @@
TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_strided_a) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 16; n <= 24; n += 8) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -3393,7 +3849,7 @@
.m(6)
.n(n)
.k(k)
- .a_stride(43)
+ .a_stride(13)
.Test(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
@@ -3402,7 +3858,7 @@
TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 16; n <= 24; n += 8) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
for (uint32_t m = 1; m <= 6; m++) {
GemmMicrokernelTester()
.mr(6)
@@ -3421,7 +3877,7 @@
TEST(F32_GEMMINC_6X8__AARCH64_NEONFMA_CORTEX_A53, strided_cm_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
for (uint32_t m = 1; m <= 6; m++) {
for (uint32_t n = 1; n <= 8; n++) {
GemmMicrokernelTester()
@@ -3449,7 +3905,7 @@
.sr(1)
.m(6)
.n(8)
- .k(8)
+ .k(2)
.qmin(128)
.Test(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
@@ -3463,7 +3919,7 @@
.sr(1)
.m(6)
.n(8)
- .k(8)
+ .k(2)
.qmax(128)
.Test(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
@@ -3477,7 +3933,7 @@
.sr(1)
.m(6)
.n(8)
- .k(8)
+ .k(2)
.cm_stride(11)
.Test(xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
diff --git a/test/f32-gemminc.yaml b/test/f32-gemminc.yaml
index 6586b7e..7d981d0 100644
--- a/test/f32-gemminc.yaml
+++ b/test/f32-gemminc.yaml
@@ -14,6 +14,10 @@
k-block: 8
pipelined: true
assembly: true
+- name: xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a53
+ k-block: 2
+ pipelined: false
+ assembly: true
- name: xnn_f32_gemminc_ukernel_4x8__aarch64_neonfma_cortex_a57
k-block: 8
pipelined: true
@@ -27,7 +31,7 @@
pipelined: true
assembly: true
- name: xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a53
- k-block: 8
+ k-block: 2
pipelined: false
assembly: true
- name: xnn_f32_gemminc_ukernel_6x8__aarch64_neonfma_cortex_a57
diff --git a/test/f32-igemm.cc b/test/f32-igemm.cc
index 07fd4f4..eae7328 100644
--- a/test/f32-igemm.cc
+++ b/test/f32-igemm.cc
@@ -1520,6 +1520,474 @@
#if XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(2)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, strided_cn) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(2)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(2)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile_m) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(2)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile_n) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(2)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_2) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k < 2; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_2_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k < 2; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_2) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 3; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_2_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 3; k < 4; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_div_2) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 4; k <= 20; k += 2) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, k_div_2_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 4; k <= 20; k += 2) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_strided_cn) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_strided_cn) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, small_kernel) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, small_kernel_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k <= 10; k += 3) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .ks(3)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_small_kernel) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_small_kernel) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, strided_cm_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k <= 10; k += 3) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, a_offset) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(43)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, zero) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t mz = 0; mz < 4; mz++) {
+ for (size_t k = 1; k <= 10; k += 3) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(43)
+ .zero_index(mz)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, qmin) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(2)
+ .qmin(128)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, qmax) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(2)
+ .qmax(128)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+
+ TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A53, strided_cm) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(2)
+ .cm_stride(11)
+ .Test(xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53);
+ }
+#endif // XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
+
+
+#if XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
TEST(F32_IGEMM_4X8__AARCH64_NEONFMA_CORTEX_A75, k_eq_8) {
TEST_REQUIRES_ARM_NEON_FMA;
GemmMicrokernelTester()
@@ -2518,7 +2986,7 @@
#if XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_eq_8) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2) {
TEST_REQUIRES_ARM_NEON_FMA;
GemmMicrokernelTester()
.mr(6)
@@ -2527,11 +2995,11 @@
.sr(1)
.m(6)
.n(8)
- .k(8)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .k(2)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, strided_cn) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, strided_cn) {
TEST_REQUIRES_ARM_NEON_FMA;
GemmMicrokernelTester()
.mr(6)
@@ -2540,12 +3008,12 @@
.sr(1)
.m(6)
.n(8)
- .k(8)
+ .k(2)
.cn_stride(11)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_eq_8_subtile) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t m = 1; m <= 6; m++) {
for (uint32_t n = 1; n <= 8; n++) {
@@ -2556,14 +3024,14 @@
.sr(1)
.m(m)
.n(n)
- .k(8)
+ .k(2)
.iterations(1)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_eq_8_subtile_m) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile_m) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t m = 1; m <= 6; m++) {
GemmMicrokernelTester()
@@ -2573,13 +3041,13 @@
.sr(1)
.m(m)
.n(8)
- .k(8)
+ .k(2)
.iterations(1)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_eq_8_subtile_n) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_eq_2_subtile_n) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 1; n <= 8; n++) {
GemmMicrokernelTester()
@@ -2589,46 +3057,15 @@
.sr(1)
.m(6)
.n(n)
- .k(8)
+ .k(2)
.iterations(1)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_eq_16) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_2) {
TEST_REQUIRES_ARM_NEON_FMA;
- GemmMicrokernelTester()
- .mr(6)
- .nr(8)
- .kr(1)
- .sr(1)
- .m(6)
- .n(8)
- .k(16)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
- }
-
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_eq_16_subtile) {
- TEST_REQUIRES_ARM_NEON_FMA;
- for (uint32_t m = 1; m <= 6; m++) {
- for (uint32_t n = 1; n <= 8; n++) {
- GemmMicrokernelTester()
- .mr(6)
- .nr(8)
- .kr(1)
- .sr(1)
- .m(m)
- .n(n)
- .k(16)
- .iterations(1)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
- }
- }
- }
-
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_lt_16) {
- TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 1; k < 16; k++) {
+ for (size_t k = 1; k < 2; k++) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -2637,13 +3074,13 @@
.m(6)
.n(8)
.k(k)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_lt_16_subtile) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_lt_2_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 1; k < 16; k++) {
+ for (size_t k = 1; k < 2; k++) {
for (uint32_t m = 1; m <= 6; m++) {
for (uint32_t n = 1; n <= 8; n++) {
GemmMicrokernelTester()
@@ -2655,15 +3092,15 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_gt_16) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_2) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 17; k < 16; k++) {
+ for (size_t k = 3; k < 4; k++) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -2672,13 +3109,13 @@
.m(6)
.n(8)
.k(k)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_gt_8_subtile) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_gt_2_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 17; k < 16; k++) {
+ for (size_t k = 3; k < 4; k++) {
for (uint32_t m = 1; m <= 6; m++) {
for (uint32_t n = 1; n <= 8; n++) {
GemmMicrokernelTester()
@@ -2690,15 +3127,15 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_div_8) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_div_2) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 24; k <= 80; k += 8) {
+ for (size_t k = 4; k <= 20; k += 2) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -2707,13 +3144,13 @@
.m(6)
.n(8)
.k(k)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_div_8_subtile) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, k_div_2_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 24; k <= 80; k += 8) {
+ for (size_t k = 4; k <= 20; k += 2) {
for (uint32_t m = 1; m <= 6; m++) {
for (uint32_t n = 1; n <= 8; n++) {
GemmMicrokernelTester()
@@ -2725,16 +3162,16 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, n_gt_8) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 9; n < 16; n++) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -2743,15 +3180,15 @@
.m(6)
.n(8)
.k(k)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, n_gt_8_strided_cn) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_strided_cn) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 9; n < 16; n++) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -2761,15 +3198,15 @@
.n(8)
.k(k)
.cn_stride(11)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, n_gt_8_subtile) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 9; n < 16; n++) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
for (uint32_t m = 1; m <= 6; m++) {
GemmMicrokernelTester()
.mr(6)
@@ -2780,16 +3217,16 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, n_div_8) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 16; n <= 24; n += 8) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -2798,15 +3235,15 @@
.m(6)
.n(8)
.k(k)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, n_div_8_strided_cn) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_strided_cn) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 16; n <= 24; n += 8) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -2816,15 +3253,15 @@
.n(n)
.k(k)
.cn_stride(11)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, n_div_8_subtile) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 16; n <= 24; n += 8) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
for (uint32_t m = 1; m <= 6; m++) {
GemmMicrokernelTester()
.mr(6)
@@ -2835,15 +3272,15 @@
.n(n)
.k(k)
.iterations(1)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, small_kernel) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, small_kernel) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -2853,13 +3290,13 @@
.n(8)
.k(k)
.ks(3)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, small_kernel_subtile) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, small_kernel_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
for (uint32_t m = 1; m <= 6; m++) {
for (uint32_t n = 1; n <= 8; n++) {
GemmMicrokernelTester()
@@ -2872,16 +3309,16 @@
.k(k)
.ks(3)
.iterations(1)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, n_gt_8_small_kernel) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, n_gt_8_small_kernel) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 9; n < 16; n++) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -2891,15 +3328,15 @@
.n(8)
.k(k)
.ks(3)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, n_div_8_small_kernel) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, n_div_8_small_kernel) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t n = 16; n <= 24; n += 8) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -2909,14 +3346,14 @@
.n(8)
.k(k)
.ks(3)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, strided_cm_subtile) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, strided_cm_subtile) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
for (uint32_t m = 1; m <= 6; m++) {
for (uint32_t n = 1; n <= 8; n++) {
GemmMicrokernelTester()
@@ -2929,15 +3366,15 @@
.k(k)
.cm_stride(11)
.iterations(1)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, a_offset) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, a_offset) {
TEST_REQUIRES_ARM_NEON_FMA;
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -2947,15 +3384,15 @@
.n(8)
.k(k)
.ks(3)
- .a_offset(251)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .a_offset(67)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, zero) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, zero) {
TEST_REQUIRES_ARM_NEON_FMA;
for (uint32_t mz = 0; mz < 6; mz++) {
- for (size_t k = 1; k <= 40; k += 9) {
+ for (size_t k = 1; k <= 10; k += 3) {
GemmMicrokernelTester()
.mr(6)
.nr(8)
@@ -2965,14 +3402,14 @@
.n(8)
.k(k)
.ks(3)
- .a_offset(251)
+ .a_offset(67)
.zero_index(mz)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
}
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, qmin) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, qmin) {
TEST_REQUIRES_ARM_NEON_FMA;
GemmMicrokernelTester()
.mr(6)
@@ -2981,12 +3418,12 @@
.sr(1)
.m(6)
.n(8)
- .k(8)
+ .k(2)
.qmin(128)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, qmax) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, qmax) {
TEST_REQUIRES_ARM_NEON_FMA;
GemmMicrokernelTester()
.mr(6)
@@ -2995,12 +3432,12 @@
.sr(1)
.m(6)
.n(8)
- .k(8)
+ .k(2)
.qmax(128)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
- TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, strided_cm) {
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A53, strided_cm) {
TEST_REQUIRES_ARM_NEON_FMA;
GemmMicrokernelTester()
.mr(6)
@@ -3009,9 +3446,9 @@
.sr(1)
.m(6)
.n(8)
- .k(8)
+ .k(2)
.cm_stride(11)
- .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53);
}
#endif // XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
@@ -3516,6 +3953,505 @@
#if XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_eq_8) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(8)
+ .k(8)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, strided_cn) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(8)
+ .k(8)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_eq_8_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t m = 1; m <= 6; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(8)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_eq_8_subtile_m) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t m = 1; m <= 6; m++) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(8)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_eq_8_subtile_n) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(n)
+ .k(8)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_eq_16) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(8)
+ .k(16)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_eq_16_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t m = 1; m <= 6; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(16)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_lt_16) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k < 16; k++) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_lt_16_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k < 16; k++) {
+ for (uint32_t m = 1; m <= 6; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_gt_16) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 17; k < 16; k++) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_gt_8_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 17; k < 16; k++) {
+ for (uint32_t m = 1; m <= 6; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_div_8) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 24; k <= 80; k += 8) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, k_div_8_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 24; k <= 80; k += 8) {
+ for (uint32_t m = 1; m <= 6; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, n_gt_8) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 40; k += 9) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, n_gt_8_strided_cn) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 40; k += 9) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, n_gt_8_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 40; k += 9) {
+ for (uint32_t m = 1; m <= 6; m++) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, n_div_8) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 40; k += 9) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, n_div_8_strided_cn) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 40; k += 9) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, n_div_8_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 40; k += 9) {
+ for (uint32_t m = 1; m <= 6; m++) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, small_kernel) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k <= 40; k += 9) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, small_kernel_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k <= 40; k += 9) {
+ for (uint32_t m = 1; m <= 6; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .ks(3)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, n_gt_8_small_kernel) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 40; k += 9) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, n_div_8_small_kernel) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 40; k += 9) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, strided_cm_subtile) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k <= 40; k += 9) {
+ for (uint32_t m = 1; m <= 6; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, a_offset) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (size_t k = 1; k <= 40; k += 9) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(251)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, zero) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ for (uint32_t mz = 0; mz < 6; mz++) {
+ for (size_t k = 1; k <= 40; k += 9) {
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(251)
+ .zero_index(mz)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, qmin) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(8)
+ .k(8)
+ .qmin(128)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, qmax) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(8)
+ .k(8)
+ .qmax(128)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+
+ TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A73, strided_cm) {
+ TEST_REQUIRES_ARM_NEON_FMA;
+ GemmMicrokernelTester()
+ .mr(6)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(6)
+ .n(8)
+ .k(8)
+ .cm_stride(11)
+ .Test(xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a73);
+ }
+#endif // XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
+
+
+#if XNN_ARCH_ARM64 && XNN_ENABLE_ASSEMBLY
TEST(F32_IGEMM_6X8__AARCH64_NEONFMA_CORTEX_A75, k_eq_8) {
TEST_REQUIRES_ARM_NEON_FMA;
GemmMicrokernelTester()
diff --git a/test/f32-igemm.yaml b/test/f32-igemm.yaml
index 0e7977c..311f9fa 100644
--- a/test/f32-igemm.yaml
+++ b/test/f32-igemm.yaml
@@ -14,6 +14,10 @@
k-block: 8
pipelined: true
assembly: true
+- name: xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a53
+ k-block: 2
+ pipelined: false
+ assembly: true
- name: xnn_f32_igemm_ukernel_4x8__aarch64_neonfma_cortex_a75
k-block: 8
pipelined: true
@@ -23,7 +27,7 @@
pipelined: true
assembly: true
- name: xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a53
- k-block: 8
+ k-block: 2
pipelined: false
assembly: true
- name: xnn_f32_igemm_ukernel_6x8__aarch64_neonfma_cortex_a57