GEMM/IGEMM microkernels with alternative activations in WAsm SIMD
PiperOrigin-RevId: 316715937
diff --git a/test/f32-gemm-relu.cc b/test/f32-gemm-relu.cc
index 35080ad..a60fcef 100644
--- a/test/f32-gemm-relu.cc
+++ b/test/f32-gemm-relu.cc
@@ -22,6 +22,1618 @@
#include "gemm-microkernel-tester.h"
+#if XNN_ARCH_WASMSIMD
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_strided_a) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_lt_4_strided_a) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+ for (size_t k = 1; k < 4; k++) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_gt_4_strided_a) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .a_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+ for (size_t k = 5; k < 8; k++) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_div_4_strided_a) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .a_stride(43)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_a) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .a_stride(23)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_a) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .a_stride(23)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .cm_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+#endif // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(4)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(4)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_strided_a) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(4)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_lt_4_strided_a) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+ for (size_t k = 1; k < 4; k++) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_gt_4_strided_a) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .a_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+ for (size_t k = 5; k < 8; k++) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_div_4_strided_a) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .a_stride(43)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_a) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(k)
+ .a_stride(23)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_a) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(k)
+ .a_stride(23)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(4)
+ .cm_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+#endif // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, strided_cn) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_eq_4_strided_a) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_lt_4_strided_a) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+ for (size_t k = 1; k < 4; k++) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_gt_4_strided_a) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .a_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+ for (size_t k = 5; k < 8; k++) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_div_4) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_div_4_strided_a) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .a_stride(43)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_a) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .a_stride(23)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, n_div_8) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, n_div_8_strided_a) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .a_stride(23)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_1X8__WASMSIMD_SPLAT_X86, strided_cm) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .cm_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+#endif // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(4)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, strided_cn) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(4)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_eq_4_strided_a) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(4)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_lt_4_strided_a) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+ for (size_t k = 1; k < 4; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_gt_4_strided_a) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .a_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+ for (size_t k = 5; k < 8; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_div_4) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_div_4_strided_a) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .a_stride(43)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_a) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .a_stride(23)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, n_div_8) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, n_div_8_strided_a) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .a_stride(23)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_RELU_4X8__WASMSIMD_SPLAT_X86, strided_cm) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(4)
+ .cm_stride(11)
+ .Test(xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+#endif // XNN_ARCH_WASMSIMD
+
+
#if XNN_ARCH_WASM
TEST(F32_GEMM_RELU_1X4__WASM, k_eq_1) {
GemmMicrokernelTester()
diff --git a/test/f32-gemm-relu.yaml b/test/f32-gemm-relu.yaml
index 41b94c7..3cd8a35 100644
--- a/test/f32-gemm-relu.yaml
+++ b/test/f32-gemm-relu.yaml
@@ -2,6 +2,14 @@
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
+- name: xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_arm
+ k-block: 4
+- name: xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat_arm
+ k-block: 4
+- name: xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat_x86
+ k-block: 4
+- name: xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat_x86
+ k-block: 4
- name: xnn_f32_gemm_relu_ukernel_1x4__wasm
k-block: 1
- name: xnn_f32_gemm_relu_ukernel_2x4__wasm
diff --git a/test/f32-gemm.cc b/test/f32-gemm.cc
index a0650b4..ceed6d2 100644
--- a/test/f32-gemm.cc
+++ b/test/f32-gemm.cc
@@ -425,6 +425,1215 @@
#endif // XNN_ARCH_ARM && XNN_ENABLE_ASSEMBLY
+#if XNN_ARCH_WASMSIMD
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, k_eq_4) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, strided_cn) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, k_eq_4_strided_a) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, k_eq_4_subtile) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, k_eq_4_subtile_m) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, k_eq_4_subtile_n) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, k_lt_4) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, k_lt_4_strided_a) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, k_lt_4_subtile) {
+ for (size_t k = 1; k < 4; k++) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, k_gt_4) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, k_gt_4_strided_a) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .a_stride(11)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, k_gt_4_subtile) {
+ for (size_t k = 5; k < 8; k++) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, k_div_4) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, k_div_4_strided_a) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .a_stride(43)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, k_div_4_subtile) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, n_gt_8) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, n_gt_8_strided_cn) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, n_gt_8_strided_a) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .a_stride(23)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, n_gt_8_subtile) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, n_div_8) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, n_div_8_strided_cn) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, n_div_8_strided_a) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .a_stride(23)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, n_div_8_subtile) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, strided_cm_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_1X8__WASMSIMD_SPLAT, strided_cm) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .cm_stride(11)
+ .Test(xnn_f32_gemm_ukernel_1x8__wasmsimd_splat);
+ }
+#endif // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, k_eq_4) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(4)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, strided_cn) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(4)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, k_eq_4_strided_a) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(4)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, k_eq_4_subtile) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, k_eq_4_subtile_m) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, k_eq_4_subtile_n) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, k_lt_4) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, k_lt_4_strided_a) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, k_lt_4_subtile) {
+ for (size_t k = 1; k < 4; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, k_gt_4) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, k_gt_4_strided_a) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .a_stride(11)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, k_gt_4_subtile) {
+ for (size_t k = 5; k < 8; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, k_div_4) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, k_div_4_strided_a) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .a_stride(43)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, k_div_4_subtile) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, n_gt_8) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, n_gt_8_strided_cn) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, n_gt_8_strided_a) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .a_stride(23)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, n_gt_8_subtile) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, n_div_8) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, n_div_8_strided_cn) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, n_div_8_strided_a) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .a_stride(23)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, n_div_8_subtile) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, strided_cm_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_4X8__WASMSIMD_SPLAT, strided_cm) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(4)
+ .cm_stride(11)
+ .Test(xnn_f32_gemm_ukernel_4x8__wasmsimd_splat);
+ }
+#endif // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, k_eq_4) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(4)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, strided_cn) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(4)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, k_eq_4_strided_a) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(4)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, k_eq_4_subtile) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, k_eq_4_subtile_m) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, k_eq_4_subtile_n) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, k_lt_4) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, k_lt_4_strided_a) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .a_stride(7)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, k_lt_4_subtile) {
+ for (size_t k = 1; k < 4; k++) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, k_gt_4) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, k_gt_4_strided_a) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .a_stride(11)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, k_gt_4_subtile) {
+ for (size_t k = 5; k < 8; k++) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, k_div_4) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, k_div_4_strided_a) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .a_stride(43)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, k_div_4_subtile) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, n_gt_8) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, n_gt_8_strided_cn) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, n_gt_8_strided_a) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(k)
+ .a_stride(23)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, n_gt_8_subtile) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, n_div_8) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, n_div_8_strided_cn) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, n_div_8_strided_a) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(k)
+ .a_stride(23)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, n_div_8_subtile) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, strided_cm_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_GEMM_5X8__WASMSIMD_SPLAT, strided_cm) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(4)
+ .cm_stride(11)
+ .Test(xnn_f32_gemm_ukernel_5x8__wasmsimd_splat);
+ }
+#endif // XNN_ARCH_WASMSIMD
+
+
#if XNN_ARCH_WASM
TEST(F32_GEMM_1X4__WASM, k_eq_1) {
GemmMicrokernelTester()
diff --git a/test/f32-gemm.yaml b/test/f32-gemm.yaml
index 102f9d7..44d0810 100644
--- a/test/f32-gemm.yaml
+++ b/test/f32-gemm.yaml
@@ -5,6 +5,12 @@
- name: xnn_f32_gemm_ukernel_4x4__aarch32_vfp_ld64
k-block: 2
assembly: true
+- name: xnn_f32_gemm_ukernel_1x8__wasmsimd_splat
+ k-block: 4
+- name: xnn_f32_gemm_ukernel_4x8__wasmsimd_splat
+ k-block: 4
+- name: xnn_f32_gemm_ukernel_5x8__wasmsimd_splat
+ k-block: 4
- name: xnn_f32_gemm_ukernel_1x4__wasm
k-block: 1
- name: xnn_f32_gemm_ukernel_2x4__wasm
diff --git a/test/f32-igemm-relu.cc b/test/f32-igemm-relu.cc
index 44c674b..8d77235 100644
--- a/test/f32-igemm-relu.cc
+++ b/test/f32-igemm-relu.cc
@@ -22,6 +22,1666 @@
#include "gemm-microkernel-tester.h"
+#if XNN_ARCH_WASMSIMD
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+ for (size_t k = 1; k < 4; k++) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+ for (size_t k = 5; k < 8; k++) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, small_kernel) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, small_kernel_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .ks(3)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, n_gt_8_small_kernel) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, n_div_8_small_kernel) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, a_offset) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(23)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, zero) {
+ for (uint32_t mz = 0; mz < 1; mz++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(23)
+ .zero_index(mz)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .cm_stride(11)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm);
+ }
+#endif // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_eq_4) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(4)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, strided_cn) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(4)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_m) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_eq_4_subtile_n) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_lt_4) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_lt_4_subtile) {
+ for (size_t k = 1; k < 4; k++) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_gt_4) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_gt_4_subtile) {
+ for (size_t k = 5; k < 8; k++) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_div_4) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, k_div_4_subtile) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, n_gt_8) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, n_gt_8_strided_cn) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, n_gt_8_subtile) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, n_div_8) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, n_div_8_strided_cn) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, n_div_8_subtile) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, small_kernel) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, small_kernel_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .ks(3)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, n_gt_8_small_kernel) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, n_div_8_small_kernel) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, strided_cm_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, a_offset) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(103)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, zero) {
+ for (uint32_t mz = 0; mz < 5; mz++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(103)
+ .zero_index(mz)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_5X8__WASMSIMD_SPLAT_ARM, strided_cm) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(4)
+ .cm_stride(11)
+ .Test(xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm);
+ }
+#endif // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, strided_cn) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+ for (size_t k = 1; k < 4; k++) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+ for (size_t k = 5; k < 8; k++) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_div_4) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, n_div_8) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, small_kernel) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, small_kernel_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .ks(3)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, n_gt_8_small_kernel) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, n_div_8_small_kernel) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, a_offset) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(23)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, zero) {
+ for (uint32_t mz = 0; mz < 1; mz++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(23)
+ .zero_index(mz)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_1X8__WASMSIMD_SPLAT_X86, strided_cm) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .cm_stride(11)
+ .Test(xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86);
+ }
+#endif // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_eq_4) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(4)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, strided_cn) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(4)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_m) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_eq_4_subtile_n) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_lt_4) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_lt_4_subtile) {
+ for (size_t k = 1; k < 4; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_gt_4) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_gt_4_subtile) {
+ for (size_t k = 5; k < 8; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_div_4) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, k_div_4_subtile) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, n_gt_8) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, n_gt_8_strided_cn) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, n_gt_8_subtile) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, n_div_8) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, n_div_8_strided_cn) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, n_div_8_subtile) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, small_kernel) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, small_kernel_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .ks(3)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, n_gt_8_small_kernel) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, n_div_8_small_kernel) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, strided_cm_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, a_offset) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(83)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, zero) {
+ for (uint32_t mz = 0; mz < 4; mz++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(83)
+ .zero_index(mz)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_RELU_4X8__WASMSIMD_SPLAT_X86, strided_cm) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(4)
+ .cm_stride(11)
+ .Test(xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86);
+ }
+#endif // XNN_ARCH_WASMSIMD
+
+
#if XNN_ARCH_WASM
TEST(F32_IGEMM_RELU_1X4__WASM, k_eq_1) {
GemmMicrokernelTester()
diff --git a/test/f32-igemm-relu.yaml b/test/f32-igemm-relu.yaml
index 6600b06..73ecd41 100644
--- a/test/f32-igemm-relu.yaml
+++ b/test/f32-igemm-relu.yaml
@@ -2,6 +2,14 @@
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
+- name: xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_arm
+ k-block: 4
+- name: xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat_arm
+ k-block: 4
+- name: xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat_x86
+ k-block: 4
+- name: xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat_x86
+ k-block: 4
- name: xnn_f32_igemm_relu_ukernel_1x4__wasm
k-block: 1
- name: xnn_f32_igemm_relu_ukernel_2x4__wasm
diff --git a/test/f32-igemm.cc b/test/f32-igemm.cc
index 2b3f9e0..c10ebfa 100644
--- a/test/f32-igemm.cc
+++ b/test/f32-igemm.cc
@@ -22,6 +22,1251 @@
#include "gemm-microkernel-tester.h"
+#if XNN_ARCH_WASMSIMD
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, k_eq_4) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, strided_cn) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, k_eq_4_subtile) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, k_eq_4_subtile_m) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, k_eq_4_subtile_n) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, k_lt_4) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, k_lt_4_subtile) {
+ for (size_t k = 1; k < 4; k++) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, k_gt_4) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, k_gt_4_subtile) {
+ for (size_t k = 5; k < 8; k++) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, k_div_4) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, k_div_4_subtile) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, n_gt_8) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, n_gt_8_strided_cn) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, n_gt_8_subtile) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, n_div_8) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, n_div_8_strided_cn) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, n_div_8_subtile) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, small_kernel) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, small_kernel_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .ks(3)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, n_gt_8_small_kernel) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, n_div_8_small_kernel) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, strided_cm_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 1; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, a_offset) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(23)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, zero) {
+ for (uint32_t mz = 0; mz < 1; mz++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(23)
+ .zero_index(mz)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_1X8__WASMSIMD_SPLAT, strided_cm) {
+ GemmMicrokernelTester()
+ .mr(1)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(1)
+ .n(8)
+ .k(4)
+ .cm_stride(11)
+ .Test(xnn_f32_igemm_ukernel_1x8__wasmsimd_splat);
+ }
+#endif // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, k_eq_4) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(4)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, strided_cn) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(4)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, k_eq_4_subtile) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, k_eq_4_subtile_m) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, k_eq_4_subtile_n) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, k_lt_4) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, k_lt_4_subtile) {
+ for (size_t k = 1; k < 4; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, k_gt_4) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, k_gt_4_subtile) {
+ for (size_t k = 5; k < 8; k++) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, k_div_4) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, k_div_4_subtile) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, n_gt_8) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, n_gt_8_strided_cn) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, n_gt_8_subtile) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, n_div_8) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, n_div_8_strided_cn) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, n_div_8_subtile) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, small_kernel) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, small_kernel_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .ks(3)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, n_gt_8_small_kernel) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, n_div_8_small_kernel) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, strided_cm_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 4; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, a_offset) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(83)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, zero) {
+ for (uint32_t mz = 0; mz < 4; mz++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(83)
+ .zero_index(mz)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_4X8__WASMSIMD_SPLAT, strided_cm) {
+ GemmMicrokernelTester()
+ .mr(4)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(4)
+ .n(8)
+ .k(4)
+ .cm_stride(11)
+ .Test(xnn_f32_igemm_ukernel_4x8__wasmsimd_splat);
+ }
+#endif // XNN_ARCH_WASMSIMD
+
+
+#if XNN_ARCH_WASMSIMD
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, k_eq_4) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(4)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, strided_cn) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(4)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, k_eq_4_subtile) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, k_eq_4_subtile_m) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(8)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, k_eq_4_subtile_n) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(4)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, k_lt_4) {
+ for (size_t k = 1; k < 4; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, k_lt_4_subtile) {
+ for (size_t k = 1; k < 4; k++) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, k_gt_4) {
+ for (size_t k = 5; k < 8; k++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, k_gt_4_subtile) {
+ for (size_t k = 5; k < 8; k++) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, k_div_4) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, k_div_4_subtile) {
+ for (size_t k = 8; k <= 40; k += 4) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, n_gt_8) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, n_gt_8_strided_cn) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, n_gt_8_subtile) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, n_div_8) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, n_div_8_strided_cn) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(n)
+ .k(k)
+ .cn_stride(11)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, n_div_8_subtile) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, small_kernel) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, small_kernel_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .ks(3)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, n_gt_8_small_kernel) {
+ for (uint32_t n = 9; n < 16; n++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, n_div_8_small_kernel) {
+ for (uint32_t n = 16; n <= 24; n += 8) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, strided_cm_subtile) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ for (uint32_t m = 1; m <= 5; m++) {
+ for (uint32_t n = 1; n <= 8; n++) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(m)
+ .n(n)
+ .k(k)
+ .cm_stride(11)
+ .iterations(1)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, a_offset) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(103)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, zero) {
+ for (uint32_t mz = 0; mz < 5; mz++) {
+ for (size_t k = 1; k <= 20; k += 5) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(k)
+ .ks(3)
+ .a_offset(103)
+ .zero_index(mz)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+ }
+ }
+
+ TEST(F32_IGEMM_5X8__WASMSIMD_SPLAT, strided_cm) {
+ GemmMicrokernelTester()
+ .mr(5)
+ .nr(8)
+ .kr(1)
+ .sr(1)
+ .m(5)
+ .n(8)
+ .k(4)
+ .cm_stride(11)
+ .Test(xnn_f32_igemm_ukernel_5x8__wasmsimd_splat);
+ }
+#endif // XNN_ARCH_WASMSIMD
+
+
#if XNN_ARCH_WASM
TEST(F32_IGEMM_1X4__WASM, k_eq_1) {
GemmMicrokernelTester()
diff --git a/test/f32-igemm.yaml b/test/f32-igemm.yaml
index 775a4f9..75f41ba 100644
--- a/test/f32-igemm.yaml
+++ b/test/f32-igemm.yaml
@@ -2,6 +2,12 @@
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
+- name: xnn_f32_igemm_ukernel_1x8__wasmsimd_splat
+ k-block: 4
+- name: xnn_f32_igemm_ukernel_4x8__wasmsimd_splat
+ k-block: 4
+- name: xnn_f32_igemm_ukernel_5x8__wasmsimd_splat
+ k-block: 4
- name: xnn_f32_igemm_ukernel_1x4__wasm
k-block: 1
- name: xnn_f32_igemm_ukernel_2x4__wasm